2024-12-02 23:08:57,699 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad 2024-12-02 23:08:57,714 main DEBUG Took 0.013146 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-02 23:08:57,715 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-02 23:08:57,716 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-02 23:08:57,717 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-02 23:08:57,719 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 23:08:57,728 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-02 23:08:57,744 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 23:08:57,755 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 23:08:57,756 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 23:08:57,756 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 23:08:57,757 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 23:08:57,757 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 23:08:57,760 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 23:08:57,761 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 23:08:57,761 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 23:08:57,762 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 23:08:57,763 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 23:08:57,764 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 23:08:57,765 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 23:08:57,765 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 23:08:57,766 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 23:08:57,767 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 23:08:57,768 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 23:08:57,768 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 23:08:57,769 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 23:08:57,769 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 23:08:57,773 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 23:08:57,774 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 23:08:57,774 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 23:08:57,775 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 23:08:57,775 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 23:08:57,776 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-02 23:08:57,778 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 23:08:57,779 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-02 23:08:57,792 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-02 23:08:57,793 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-02 23:08:57,794 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-02 23:08:57,795 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-02 23:08:57,810 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-02 23:08:57,814 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-02 23:08:57,816 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-02 23:08:57,816 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-02 23:08:57,817 main DEBUG createAppenders(={Console}) 2024-12-02 23:08:57,818 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad initialized 2024-12-02 23:08:57,818 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad 2024-12-02 23:08:57,818 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5c7933ad OK. 2024-12-02 23:08:57,819 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-02 23:08:57,819 main DEBUG OutputStream closed 2024-12-02 23:08:57,820 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-02 23:08:57,820 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-02 23:08:57,820 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@7c711375 OK 2024-12-02 23:08:58,040 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-02 23:08:58,042 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-02 23:08:58,044 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-02 23:08:58,045 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-02 23:08:58,046 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-02 23:08:58,046 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-02 23:08:58,047 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-02 23:08:58,047 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-02 23:08:58,048 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-02 23:08:58,048 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-02 23:08:58,049 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-02 23:08:58,049 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-02 23:08:58,050 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-02 23:08:58,050 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-02 23:08:58,051 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-02 23:08:58,051 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-02 23:08:58,052 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-02 23:08:58,053 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-02 23:08:58,062 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-02 23:08:58,063 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@554e218) with optional ClassLoader: null 2024-12-02 23:08:58,063 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-02 23:08:58,064 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@554e218] started OK. 2024-12-02T23:08:58,096 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestExportSnapshot timeout: 13 mins 2024-12-02 23:08:58,101 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-02 23:08:58,101 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-02T23:08:58,476 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77 2024-12-02T23:08:58,478 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestMobExportSnapshot timeout: 13 mins 2024-12-02T23:08:58,480 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestMobSecureExportSnapshot timeout: 13 mins 2024-12-02T23:08:58,545 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-02T23:08:58,880 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-02T23:08:58,929 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde, deleteOnExit=true 2024-12-02T23:08:58,929 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-02T23:08:58,931 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/test.cache.data in system properties and HBase conf 2024-12-02T23:08:58,931 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop.tmp.dir in system properties and HBase conf 2024-12-02T23:08:58,932 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop.log.dir in system properties and HBase conf 2024-12-02T23:08:58,933 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-02T23:08:58,933 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-02T23:08:58,934 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-02T23:08:59,159 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-02T23:08:59,164 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-02T23:08:59,170 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-02T23:08:59,171 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-02T23:08:59,172 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T23:08:59,174 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-02T23:08:59,176 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-02T23:08:59,177 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T23:08:59,184 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T23:08:59,188 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-02T23:08:59,188 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/nfs.dump.dir in system properties and HBase conf 2024-12-02T23:08:59,189 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/java.io.tmpdir in system properties and HBase conf 2024-12-02T23:08:59,189 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T23:08:59,190 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-02T23:08:59,190 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-02T23:09:00,581 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-02T23:09:00,699 INFO [Time-limited test {}] log.Log(170): Logging initialized @4227ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-02T23:09:00,838 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T23:09:00,931 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T23:09:00,983 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T23:09:00,983 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T23:09:00,985 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T23:09:01,011 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T23:09:01,021 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@36cc7298{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop.log.dir/,AVAILABLE} 2024-12-02T23:09:01,022 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6f6bc956{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T23:09:01,410 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@11ff486d{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/java.io.tmpdir/jetty-localhost-39743-hadoop-hdfs-3_4_1-tests_jar-_-any-5593995901625198449/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T23:09:01,420 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@921c3a7{HTTP/1.1, (http/1.1)}{localhost:39743} 2024-12-02T23:09:01,420 INFO [Time-limited test {}] server.Server(415): Started @4949ms 2024-12-02T23:09:02,039 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T23:09:02,046 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T23:09:02,059 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T23:09:02,059 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T23:09:02,059 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T23:09:02,062 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3178d580{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop.log.dir/,AVAILABLE} 2024-12-02T23:09:02,062 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a229d4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T23:09:02,213 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2da58674{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/java.io.tmpdir/jetty-localhost-35071-hadoop-hdfs-3_4_1-tests_jar-_-any-5435059064239344280/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T23:09:02,214 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@41d5b04a{HTTP/1.1, (http/1.1)}{localhost:35071} 2024-12-02T23:09:02,214 INFO [Time-limited test {}] server.Server(415): Started @5743ms 2024-12-02T23:09:02,276 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T23:09:02,438 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T23:09:02,447 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T23:09:02,452 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T23:09:02,452 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T23:09:02,453 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T23:09:02,455 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6f51a37c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop.log.dir/,AVAILABLE} 2024-12-02T23:09:02,456 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7e56013e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T23:09:02,618 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@277f3f27{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/java.io.tmpdir/jetty-localhost-35343-hadoop-hdfs-3_4_1-tests_jar-_-any-6463826962336447655/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T23:09:02,620 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@33120766{HTTP/1.1, (http/1.1)}{localhost:35343} 2024-12-02T23:09:02,621 INFO [Time-limited test {}] server.Server(415): Started @6149ms 2024-12-02T23:09:02,624 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T23:09:02,755 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T23:09:02,796 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T23:09:02,832 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T23:09:02,833 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T23:09:02,833 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T23:09:02,842 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@467ac91b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop.log.dir/,AVAILABLE} 2024-12-02T23:09:02,843 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@186ac0d4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T23:09:02,912 WARN [Thread-105 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data3/current/BP-217505668-172.17.0.3-1733180940242/current, will proceed with Du for space computation calculation, 2024-12-02T23:09:02,913 WARN [Thread-106 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data1/current/BP-217505668-172.17.0.3-1733180940242/current, will proceed with Du for space computation calculation, 2024-12-02T23:09:02,915 WARN [Thread-107 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data4/current/BP-217505668-172.17.0.3-1733180940242/current, will proceed with Du for space computation calculation, 2024-12-02T23:09:02,922 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data2/current/BP-217505668-172.17.0.3-1733180940242/current, will proceed with Du for space computation calculation, 2024-12-02T23:09:03,006 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T23:09:03,007 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T23:09:03,017 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@30354aa5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/java.io.tmpdir/jetty-localhost-44295-hadoop-hdfs-3_4_1-tests_jar-_-any-15451950354734906687/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T23:09:03,018 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@66129f6{HTTP/1.1, (http/1.1)}{localhost:44295} 2024-12-02T23:09:03,018 INFO [Time-limited test {}] server.Server(415): Started @6547ms 2024-12-02T23:09:03,022 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T23:09:03,089 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x69ebe4d304725fbc with lease ID 0x6a3c3f47eb611e8f: Processing first storage report for DS-fa88d329-7c97-4bbe-a2d8-6d912b0accb1 from datanode DatanodeRegistration(127.0.0.1:39833, datanodeUuid=46e97ca3-746a-4afe-806a-5271e62119a0, infoPort=42697, infoSecurePort=0, ipcPort=35215, storageInfo=lv=-57;cid=testClusterID;nsid=774893853;c=1733180940242) 2024-12-02T23:09:03,090 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x69ebe4d304725fbc with lease ID 0x6a3c3f47eb611e8f: from storage DS-fa88d329-7c97-4bbe-a2d8-6d912b0accb1 node DatanodeRegistration(127.0.0.1:39833, datanodeUuid=46e97ca3-746a-4afe-806a-5271e62119a0, infoPort=42697, infoSecurePort=0, ipcPort=35215, storageInfo=lv=-57;cid=testClusterID;nsid=774893853;c=1733180940242), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-02T23:09:03,091 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x532bbe5e664c90dd with lease ID 0x6a3c3f47eb611e90: Processing first storage report for DS-c00f0d44-9ed1-46ca-a8d0-e778f8c47b51 from datanode DatanodeRegistration(127.0.0.1:37623, datanodeUuid=77e55a7f-d7ba-46c0-963f-8728d50b2740, infoPort=46367, infoSecurePort=0, ipcPort=46543, storageInfo=lv=-57;cid=testClusterID;nsid=774893853;c=1733180940242) 2024-12-02T23:09:03,091 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x532bbe5e664c90dd with lease ID 0x6a3c3f47eb611e90: from storage DS-c00f0d44-9ed1-46ca-a8d0-e778f8c47b51 node DatanodeRegistration(127.0.0.1:37623, datanodeUuid=77e55a7f-d7ba-46c0-963f-8728d50b2740, infoPort=46367, infoSecurePort=0, ipcPort=46543, storageInfo=lv=-57;cid=testClusterID;nsid=774893853;c=1733180940242), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-02T23:09:03,091 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x69ebe4d304725fbc with lease ID 0x6a3c3f47eb611e8f: Processing first storage report for DS-48e02db6-2830-4f1e-a306-3ec94b96628f from datanode DatanodeRegistration(127.0.0.1:39833, datanodeUuid=46e97ca3-746a-4afe-806a-5271e62119a0, infoPort=42697, infoSecurePort=0, ipcPort=35215, storageInfo=lv=-57;cid=testClusterID;nsid=774893853;c=1733180940242) 2024-12-02T23:09:03,091 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x69ebe4d304725fbc with lease ID 0x6a3c3f47eb611e8f: from storage DS-48e02db6-2830-4f1e-a306-3ec94b96628f node DatanodeRegistration(127.0.0.1:39833, datanodeUuid=46e97ca3-746a-4afe-806a-5271e62119a0, infoPort=42697, infoSecurePort=0, ipcPort=35215, storageInfo=lv=-57;cid=testClusterID;nsid=774893853;c=1733180940242), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T23:09:03,091 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x532bbe5e664c90dd with lease ID 0x6a3c3f47eb611e90: Processing first storage report for DS-9a4a1224-6c3e-4c3f-bbb7-02b6d0f46ade from datanode DatanodeRegistration(127.0.0.1:37623, datanodeUuid=77e55a7f-d7ba-46c0-963f-8728d50b2740, infoPort=46367, infoSecurePort=0, ipcPort=46543, storageInfo=lv=-57;cid=testClusterID;nsid=774893853;c=1733180940242) 2024-12-02T23:09:03,092 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x532bbe5e664c90dd with lease ID 0x6a3c3f47eb611e90: from storage DS-9a4a1224-6c3e-4c3f-bbb7-02b6d0f46ade node DatanodeRegistration(127.0.0.1:37623, datanodeUuid=77e55a7f-d7ba-46c0-963f-8728d50b2740, infoPort=46367, infoSecurePort=0, ipcPort=46543, storageInfo=lv=-57;cid=testClusterID;nsid=774893853;c=1733180940242), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T23:09:03,242 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data5/current/BP-217505668-172.17.0.3-1733180940242/current, will proceed with Du for space computation calculation, 2024-12-02T23:09:03,243 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data6/current/BP-217505668-172.17.0.3-1733180940242/current, will proceed with Du for space computation calculation, 2024-12-02T23:09:03,306 WARN [Thread-129 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T23:09:03,312 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf41aa83d55045ecb with lease ID 0x6a3c3f47eb611e91: Processing first storage report for DS-3ebde639-f20d-4e6f-b72f-9130658f7991 from datanode DatanodeRegistration(127.0.0.1:45149, datanodeUuid=fbe7c001-5e56-4901-a413-ab6e43bc91a6, infoPort=46211, infoSecurePort=0, ipcPort=38073, storageInfo=lv=-57;cid=testClusterID;nsid=774893853;c=1733180940242) 2024-12-02T23:09:03,313 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf41aa83d55045ecb with lease ID 0x6a3c3f47eb611e91: from storage DS-3ebde639-f20d-4e6f-b72f-9130658f7991 node DatanodeRegistration(127.0.0.1:45149, datanodeUuid=fbe7c001-5e56-4901-a413-ab6e43bc91a6, infoPort=46211, infoSecurePort=0, ipcPort=38073, storageInfo=lv=-57;cid=testClusterID;nsid=774893853;c=1733180940242), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-02T23:09:03,313 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf41aa83d55045ecb with lease ID 0x6a3c3f47eb611e91: Processing first storage report for DS-72324e56-b443-4d49-91df-ebb01f04adfd from datanode DatanodeRegistration(127.0.0.1:45149, datanodeUuid=fbe7c001-5e56-4901-a413-ab6e43bc91a6, infoPort=46211, infoSecurePort=0, ipcPort=38073, storageInfo=lv=-57;cid=testClusterID;nsid=774893853;c=1733180940242) 2024-12-02T23:09:03,313 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf41aa83d55045ecb with lease ID 0x6a3c3f47eb611e91: from storage DS-72324e56-b443-4d49-91df-ebb01f04adfd node DatanodeRegistration(127.0.0.1:45149, datanodeUuid=fbe7c001-5e56-4901-a413-ab6e43bc91a6, infoPort=46211, infoSecurePort=0, ipcPort=38073, storageInfo=lv=-57;cid=testClusterID;nsid=774893853;c=1733180940242), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T23:09:03,585 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77 2024-12-02T23:09:03,703 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/zookeeper_0, clientPort=57019, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-02T23:09:03,724 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57019 2024-12-02T23:09:03,742 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T23:09:03,746 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T23:09:04,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741825_1001 (size=7) 2024-12-02T23:09:04,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741825_1001 (size=7) 2024-12-02T23:09:04,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741825_1001 (size=7) 2024-12-02T23:09:04,514 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 with version=8 2024-12-02T23:09:04,514 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/hbase-staging 2024-12-02T23:09:04,616 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-02T23:09:04,924 INFO [Time-limited test {}] client.ConnectionUtils(128): master/9e5ab3b623d7:0 server-side Connection retries=45 2024-12-02T23:09:04,940 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T23:09:04,940 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T23:09:04,948 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T23:09:04,948 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T23:09:04,948 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T23:09:05,132 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-02T23:09:05,207 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-02T23:09:05,216 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-02T23:09:05,220 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T23:09:05,255 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 16378 (auto-detected) 2024-12-02T23:09:05,256 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-12-02T23:09:05,293 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:41513 2024-12-02T23:09:05,324 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41513 connecting to ZooKeeper ensemble=127.0.0.1:57019 2024-12-02T23:09:05,367 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:415130x0, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T23:09:05,371 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41513-0x101503561da0000 connected 2024-12-02T23:09:05,435 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T23:09:05,441 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T23:09:05,459 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T23:09:05,465 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156, hbase.cluster.distributed=false 2024-12-02T23:09:05,519 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T23:09:05,531 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41513 2024-12-02T23:09:05,531 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41513 2024-12-02T23:09:05,533 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41513 2024-12-02T23:09:05,534 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41513 2024-12-02T23:09:05,534 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41513 2024-12-02T23:09:05,672 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/9e5ab3b623d7:0 server-side Connection retries=45 2024-12-02T23:09:05,675 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T23:09:05,676 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T23:09:05,676 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T23:09:05,676 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T23:09:05,676 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T23:09:05,680 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T23:09:05,684 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T23:09:05,685 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:39847 2024-12-02T23:09:05,702 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39847 connecting to ZooKeeper ensemble=127.0.0.1:57019 2024-12-02T23:09:05,704 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T23:09:05,710 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T23:09:05,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:398470x0, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T23:09:05,725 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:398470x0, quorum=127.0.0.1:57019, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T23:09:05,730 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39847-0x101503561da0001 connected 2024-12-02T23:09:05,732 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T23:09:05,740 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-12-02T23:09:05,742 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T23:09:05,748 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T23:09:05,750 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39847 2024-12-02T23:09:05,750 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39847 2024-12-02T23:09:05,751 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39847 2024-12-02T23:09:05,752 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39847 2024-12-02T23:09:05,753 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39847 2024-12-02T23:09:05,780 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/9e5ab3b623d7:0 server-side Connection retries=45 2024-12-02T23:09:05,780 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T23:09:05,780 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T23:09:05,781 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T23:09:05,781 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T23:09:05,781 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T23:09:05,782 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T23:09:05,782 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T23:09:05,784 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:38049 2024-12-02T23:09:05,786 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38049 connecting to ZooKeeper ensemble=127.0.0.1:57019 2024-12-02T23:09:05,788 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T23:09:05,791 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T23:09:05,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:380490x0, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T23:09:05,798 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:380490x0, quorum=127.0.0.1:57019, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T23:09:05,798 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T23:09:05,801 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38049-0x101503561da0002 connected 2024-12-02T23:09:05,802 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-12-02T23:09:05,803 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T23:09:05,805 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T23:09:05,808 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38049 2024-12-02T23:09:05,809 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38049 2024-12-02T23:09:05,810 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38049 2024-12-02T23:09:05,811 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38049 2024-12-02T23:09:05,811 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38049 2024-12-02T23:09:05,829 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/9e5ab3b623d7:0 server-side Connection retries=45 2024-12-02T23:09:05,829 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T23:09:05,830 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T23:09:05,830 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T23:09:05,830 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T23:09:05,830 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T23:09:05,830 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T23:09:05,831 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T23:09:05,832 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:41311 2024-12-02T23:09:05,833 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41311 connecting to ZooKeeper ensemble=127.0.0.1:57019 2024-12-02T23:09:05,834 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T23:09:05,837 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T23:09:05,843 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:413110x0, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T23:09:05,844 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:413110x0, quorum=127.0.0.1:57019, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T23:09:05,845 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41311-0x101503561da0003 connected 2024-12-02T23:09:05,845 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T23:09:05,846 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-12-02T23:09:05,847 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T23:09:05,852 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T23:09:05,864 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41311 2024-12-02T23:09:05,866 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41311 2024-12-02T23:09:05,869 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41311 2024-12-02T23:09:05,873 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41311 2024-12-02T23:09:05,875 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41311 2024-12-02T23:09:05,894 DEBUG [M:0;9e5ab3b623d7:41513 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;9e5ab3b623d7:41513 2024-12-02T23:09:05,895 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/9e5ab3b623d7,41513,1733180944669 2024-12-02T23:09:05,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T23:09:05,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T23:09:05,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T23:09:05,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T23:09:05,910 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/9e5ab3b623d7,41513,1733180944669 2024-12-02T23:09:05,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T23:09:05,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T23:09:05,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T23:09:05,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T23:09:05,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T23:09:05,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T23:09:05,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T23:09:05,940 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T23:09:05,942 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/9e5ab3b623d7,41513,1733180944669 from backup master directory 2024-12-02T23:09:05,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T23:09:05,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/9e5ab3b623d7,41513,1733180944669 2024-12-02T23:09:05,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T23:09:05,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T23:09:05,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T23:09:05,946 WARN [master/9e5ab3b623d7:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T23:09:05,946 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=9e5ab3b623d7,41513,1733180944669 2024-12-02T23:09:05,949 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-02T23:09:05,951 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-02T23:09:06,106 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/hbase.id] with ID: 681687ad-3859-406f-bb7d-05ef89be9a18 2024-12-02T23:09:06,108 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.tmp/hbase.id 2024-12-02T23:09:06,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741826_1002 (size=42) 2024-12-02T23:09:06,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741826_1002 (size=42) 2024-12-02T23:09:06,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741826_1002 (size=42) 2024-12-02T23:09:06,249 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.tmp/hbase.id]:[hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/hbase.id] 2024-12-02T23:09:06,304 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T23:09:06,309 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-02T23:09:06,333 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 22ms. 2024-12-02T23:09:06,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T23:09:06,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T23:09:06,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T23:09:06,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T23:09:06,349 WARN [IPC Server handler 4 on default port 33369 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T23:09:06,349 WARN [IPC Server handler 4 on default port 33369 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T23:09:06,349 WARN [IPC Server handler 4 on default port 33369 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T23:09:06,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741827_1003 (size=196) 2024-12-02T23:09:06,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741827_1003 (size=196) 2024-12-02T23:09:06,396 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T23:09:06,398 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-02T23:09:06,419 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:150) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:174) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:262) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:231) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:400) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T23:09:06,425 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T23:09:06,463 WARN [IPC Server handler 1 on default port 33369 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T23:09:06,463 WARN [IPC Server handler 1 on default port 33369 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T23:09:06,464 WARN [IPC Server handler 1 on default port 33369 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T23:09:06,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741828_1004 (size=1189) 2024-12-02T23:09:06,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741828_1004 (size=1189) 2024-12-02T23:09:06,504 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/MasterData/data/master/store 2024-12-02T23:09:06,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741829_1005 (size=34) 2024-12-02T23:09:06,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741829_1005 (size=34) 2024-12-02T23:09:06,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741829_1005 (size=34) 2024-12-02T23:09:06,561 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-02T23:09:06,565 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:09:06,566 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T23:09:06,566 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T23:09:06,567 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T23:09:06,569 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T23:09:06,569 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T23:09:06,570 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T23:09:06,571 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733180946566Disabling compacts and flushes for region at 1733180946566Disabling writes for close at 1733180946569 (+3 ms)Writing region close event to WAL at 1733180946570 (+1 ms)Closed at 1733180946570 2024-12-02T23:09:06,574 WARN [master/9e5ab3b623d7:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/MasterData/data/master/store/.initializing 2024-12-02T23:09:06,574 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/MasterData/WALs/9e5ab3b623d7,41513,1733180944669 2024-12-02T23:09:06,586 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-02T23:09:06,607 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9e5ab3b623d7%2C41513%2C1733180944669, suffix=, logDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/MasterData/WALs/9e5ab3b623d7,41513,1733180944669, archiveDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/MasterData/oldWALs, maxLogs=10 2024-12-02T23:09:06,641 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/MasterData/WALs/9e5ab3b623d7,41513,1733180944669/9e5ab3b623d7%2C41513%2C1733180944669.1733180946613, exclude list is [], retry=0 2024-12-02T23:09:06,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37623,DS-c00f0d44-9ed1-46ca-a8d0-e778f8c47b51,DISK] 2024-12-02T23:09:06,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39833,DS-fa88d329-7c97-4bbe-a2d8-6d912b0accb1,DISK] 2024-12-02T23:09:06,666 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45149,DS-3ebde639-f20d-4e6f-b72f-9130658f7991,DISK] 2024-12-02T23:09:06,670 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-02T23:09:06,738 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/MasterData/WALs/9e5ab3b623d7,41513,1733180944669/9e5ab3b623d7%2C41513%2C1733180944669.1733180946613 2024-12-02T23:09:06,739 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46211:46211),(127.0.0.1/127.0.0.1:46367:46367),(127.0.0.1/127.0.0.1:42697:42697)] 2024-12-02T23:09:06,740 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-02T23:09:06,741 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:09:06,746 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T23:09:06,747 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T23:09:06,818 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T23:09:06,865 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-02T23:09:06,870 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:09:06,876 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T23:09:06,877 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T23:09:06,885 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-02T23:09:06,886 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:09:06,887 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T23:09:06,887 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T23:09:06,890 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-02T23:09:06,891 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:09:06,892 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T23:09:06,892 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T23:09:06,896 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-02T23:09:06,896 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:09:06,906 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T23:09:06,907 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T23:09:06,912 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T23:09:06,917 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T23:09:06,925 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T23:09:06,928 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T23:09:06,935 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T23:09:06,940 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T23:09:06,946 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T23:09:06,947 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70362062, jitterRate=0.04847642779350281}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T23:09:06,956 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733180946774Initializing all the Stores at 1733180946776 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733180946777 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733180946785 (+8 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733180946786 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733180946787 (+1 ms)Cleaning up temporary data from old regions at 1733180946928 (+141 ms)Region opened successfully at 1733180946955 (+27 ms) 2024-12-02T23:09:06,957 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-02T23:09:07,009 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69734914, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9e5ab3b623d7/172.17.0.3:0 2024-12-02T23:09:07,054 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-02T23:09:07,070 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-02T23:09:07,070 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-02T23:09:07,074 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-02T23:09:07,076 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-02T23:09:07,088 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 12 msec 2024-12-02T23:09:07,088 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-02T23:09:07,125 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-02T23:09:07,138 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-02T23:09:07,145 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-02T23:09:07,149 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-02T23:09:07,155 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-02T23:09:07,162 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-02T23:09:07,166 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-02T23:09:07,180 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-02T23:09:07,188 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-02T23:09:07,193 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-02T23:09:07,195 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-02T23:09:07,230 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-02T23:09:07,244 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-02T23:09:07,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T23:09:07,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T23:09:07,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T23:09:07,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T23:09:07,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T23:09:07,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T23:09:07,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T23:09:07,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T23:09:07,262 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=9e5ab3b623d7,41513,1733180944669, sessionid=0x101503561da0000, setting cluster-up flag (Was=false) 2024-12-02T23:09:07,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T23:09:07,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T23:09:07,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T23:09:07,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T23:09:07,301 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-02T23:09:07,305 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=9e5ab3b623d7,41513,1733180944669 2024-12-02T23:09:07,315 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T23:09:07,315 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T23:09:07,315 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T23:09:07,315 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T23:09:07,323 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-02T23:09:07,326 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=9e5ab3b623d7,41513,1733180944669 2024-12-02T23:09:07,348 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-02T23:09:07,410 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] master.HMaster(3441): Registered master coprocessor service: service=AccessControlService 2024-12-02T23:09:07,418 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T23:09:07,418 INFO [RS:0;9e5ab3b623d7:39847 {}] regionserver.HRegionServer(746): ClusterId : 681687ad-3859-406f-bb7d-05ef89be9a18 2024-12-02T23:09:07,418 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver loaded, priority=536870912. 2024-12-02T23:09:07,422 DEBUG [RS:0;9e5ab3b623d7:39847 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T23:09:07,430 INFO [RS:1;9e5ab3b623d7:38049 {}] regionserver.HRegionServer(746): ClusterId : 681687ad-3859-406f-bb7d-05ef89be9a18 2024-12-02T23:09:07,430 INFO [RS:2;9e5ab3b623d7:41311 {}] regionserver.HRegionServer(746): ClusterId : 681687ad-3859-406f-bb7d-05ef89be9a18 2024-12-02T23:09:07,430 DEBUG [RS:1;9e5ab3b623d7:38049 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T23:09:07,430 DEBUG [RS:2;9e5ab3b623d7:41311 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T23:09:07,436 DEBUG [RS:2;9e5ab3b623d7:41311 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T23:09:07,436 DEBUG [RS:2;9e5ab3b623d7:41311 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T23:09:07,439 DEBUG [RS:1;9e5ab3b623d7:38049 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T23:09:07,439 DEBUG [RS:1;9e5ab3b623d7:38049 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T23:09:07,447 DEBUG [RS:2;9e5ab3b623d7:41311 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T23:09:07,448 DEBUG [RS:2;9e5ab3b623d7:41311 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1566facf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9e5ab3b623d7/172.17.0.3:0 2024-12-02T23:09:07,449 DEBUG [RS:0;9e5ab3b623d7:39847 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T23:09:07,449 DEBUG [RS:0;9e5ab3b623d7:39847 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T23:09:07,457 DEBUG [RS:0;9e5ab3b623d7:39847 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T23:09:07,457 DEBUG [RS:0;9e5ab3b623d7:39847 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79c3dc92, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9e5ab3b623d7/172.17.0.3:0 2024-12-02T23:09:07,469 DEBUG [RS:1;9e5ab3b623d7:38049 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T23:09:07,470 DEBUG [RS:1;9e5ab3b623d7:38049 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e5fafea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9e5ab3b623d7/172.17.0.3:0 2024-12-02T23:09:07,492 DEBUG [RS:2;9e5ab3b623d7:41311 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;9e5ab3b623d7:41311 2024-12-02T23:09:07,496 DEBUG [RS:0;9e5ab3b623d7:39847 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;9e5ab3b623d7:39847 2024-12-02T23:09:07,498 INFO [RS:2;9e5ab3b623d7:41311 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-02T23:09:07,498 INFO [RS:0;9e5ab3b623d7:39847 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-02T23:09:07,498 INFO [RS:2;9e5ab3b623d7:41311 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-02T23:09:07,498 INFO [RS:0;9e5ab3b623d7:39847 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-02T23:09:07,501 DEBUG [RS:0;9e5ab3b623d7:39847 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-02T23:09:07,502 DEBUG [RS:2;9e5ab3b623d7:41311 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-02T23:09:07,502 INFO [RS:2;9e5ab3b623d7:41311 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T23:09:07,502 DEBUG [RS:2;9e5ab3b623d7:41311 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-02T23:09:07,503 INFO [RS:0;9e5ab3b623d7:39847 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T23:09:07,503 DEBUG [RS:0;9e5ab3b623d7:39847 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-02T23:09:07,506 INFO [RS:0;9e5ab3b623d7:39847 {}] regionserver.HRegionServer(2659): reportForDuty to master=9e5ab3b623d7,41513,1733180944669 with port=39847, startcode=1733180945622 2024-12-02T23:09:07,509 INFO [RS:2;9e5ab3b623d7:41311 {}] regionserver.HRegionServer(2659): reportForDuty to master=9e5ab3b623d7,41513,1733180944669 with port=41311, startcode=1733180945828 2024-12-02T23:09:07,519 DEBUG [RS:1;9e5ab3b623d7:38049 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;9e5ab3b623d7:38049 2024-12-02T23:09:07,519 INFO [RS:1;9e5ab3b623d7:38049 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-02T23:09:07,519 INFO [RS:1;9e5ab3b623d7:38049 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-02T23:09:07,519 DEBUG [RS:1;9e5ab3b623d7:38049 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-02T23:09:07,520 INFO [RS:1;9e5ab3b623d7:38049 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T23:09:07,520 DEBUG [RS:1;9e5ab3b623d7:38049 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-02T23:09:07,521 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-02T23:09:07,526 INFO [RS:1;9e5ab3b623d7:38049 {}] regionserver.HRegionServer(2659): reportForDuty to master=9e5ab3b623d7,41513,1733180944669 with port=38049, startcode=1733180945779 2024-12-02T23:09:07,535 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-02T23:09:07,536 DEBUG [RS:0;9e5ab3b623d7:39847 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T23:09:07,536 DEBUG [RS:1;9e5ab3b623d7:38049 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T23:09:07,536 DEBUG [RS:2;9e5ab3b623d7:41311 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T23:09:07,550 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-02T23:09:07,558 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 9e5ab3b623d7,41513,1733180944669 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-02T23:09:07,575 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/9e5ab3b623d7:0, corePoolSize=5, maxPoolSize=5 2024-12-02T23:09:07,575 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/9e5ab3b623d7:0, corePoolSize=5, maxPoolSize=5 2024-12-02T23:09:07,575 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/9e5ab3b623d7:0, corePoolSize=5, maxPoolSize=5 2024-12-02T23:09:07,575 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/9e5ab3b623d7:0, corePoolSize=5, maxPoolSize=5 2024-12-02T23:09:07,576 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/9e5ab3b623d7:0, corePoolSize=10, maxPoolSize=10 2024-12-02T23:09:07,576 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/9e5ab3b623d7:0, corePoolSize=1, maxPoolSize=1 2024-12-02T23:09:07,576 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/9e5ab3b623d7:0, corePoolSize=2, maxPoolSize=2 2024-12-02T23:09:07,576 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/9e5ab3b623d7:0, corePoolSize=1, maxPoolSize=1 2024-12-02T23:09:07,609 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T23:09:07,611 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43109, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T23:09:07,612 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60783, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T23:09:07,615 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-02T23:09:07,615 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51885, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T23:09:07,627 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41513 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-02T23:09:07,634 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733180977634 2024-12-02T23:09:07,636 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-02T23:09:07,637 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41513 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-02T23:09:07,637 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-02T23:09:07,638 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41513 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-02T23:09:07,643 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:09:07,645 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-02T23:09:07,646 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-02T23:09:07,648 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-02T23:09:07,649 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-02T23:09:07,648 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T23:09:07,669 DEBUG [RS:2;9e5ab3b623d7:41311 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-02T23:09:07,670 WARN [RS:2;9e5ab3b623d7:41311 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-02T23:09:07,670 DEBUG [RS:0;9e5ab3b623d7:39847 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-02T23:09:07,670 WARN [RS:0;9e5ab3b623d7:39847 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-02T23:09:07,670 DEBUG [RS:1;9e5ab3b623d7:38049 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-02T23:09:07,670 WARN [RS:1;9e5ab3b623d7:38049 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-02T23:09:07,674 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:07,687 WARN [IPC Server handler 1 on default port 33369 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T23:09:07,687 WARN [IPC Server handler 1 on default port 33369 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T23:09:07,687 WARN [IPC Server handler 1 on default port 33369 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T23:09:07,688 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-02T23:09:07,690 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-02T23:09:07,691 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-02T23:09:07,703 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-02T23:09:07,704 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-02T23:09:07,720 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/9e5ab3b623d7:0:becomeActiveMaster-HFileCleaner.large.0-1733180947706,5,FailOnTimeoutGroup] 2024-12-02T23:09:07,723 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/9e5ab3b623d7:0:becomeActiveMaster-HFileCleaner.small.0-1733180947721,5,FailOnTimeoutGroup] 2024-12-02T23:09:07,723 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:07,723 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-02T23:09:07,725 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:07,725 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:07,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741831_1007 (size=1321) 2024-12-02T23:09:07,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741831_1007 (size=1321) 2024-12-02T23:09:07,778 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-02T23:09:07,778 INFO [RS:2;9e5ab3b623d7:41311 {}] regionserver.HRegionServer(2659): reportForDuty to master=9e5ab3b623d7,41513,1733180944669 with port=41311, startcode=1733180945828 2024-12-02T23:09:07,778 INFO [RS:0;9e5ab3b623d7:39847 {}] regionserver.HRegionServer(2659): reportForDuty to master=9e5ab3b623d7,41513,1733180944669 with port=39847, startcode=1733180945622 2024-12-02T23:09:07,778 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:09:07,778 INFO [RS:1;9e5ab3b623d7:38049 {}] regionserver.HRegionServer(2659): reportForDuty to master=9e5ab3b623d7,41513,1733180944669 with port=38049, startcode=1733180945779 2024-12-02T23:09:07,780 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41513 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 9e5ab3b623d7,39847,1733180945622 2024-12-02T23:09:07,785 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41513 {}] master.ServerManager(517): Registering regionserver=9e5ab3b623d7,39847,1733180945622 2024-12-02T23:09:07,800 DEBUG [RS:0;9e5ab3b623d7:39847 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:09:07,800 DEBUG [RS:0;9e5ab3b623d7:39847 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33369 2024-12-02T23:09:07,800 DEBUG [RS:0;9e5ab3b623d7:39847 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-02T23:09:07,801 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41513 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 9e5ab3b623d7,38049,1733180945779 2024-12-02T23:09:07,801 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41513 {}] master.ServerManager(517): Registering regionserver=9e5ab3b623d7,38049,1733180945779 2024-12-02T23:09:07,808 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T23:09:07,809 DEBUG [RS:0;9e5ab3b623d7:39847 {}] zookeeper.ZKUtil(111): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/9e5ab3b623d7,39847,1733180945622 2024-12-02T23:09:07,810 WARN [RS:0;9e5ab3b623d7:39847 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T23:09:07,810 INFO [RS:0;9e5ab3b623d7:39847 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T23:09:07,810 DEBUG [RS:0;9e5ab3b623d7:39847 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/WALs/9e5ab3b623d7,39847,1733180945622 2024-12-02T23:09:07,814 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41513 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 9e5ab3b623d7,41311,1733180945828 2024-12-02T23:09:07,814 DEBUG [RS:1;9e5ab3b623d7:38049 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:09:07,815 DEBUG [RS:1;9e5ab3b623d7:38049 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33369 2024-12-02T23:09:07,815 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41513 {}] master.ServerManager(517): Registering regionserver=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:09:07,815 DEBUG [RS:1;9e5ab3b623d7:38049 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-02T23:09:07,819 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T23:09:07,819 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T23:09:07,820 WARN [RedundancyMonitor {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK, ARCHIVE], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T23:09:07,820 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) All required storage types are unavailable: unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T23:09:07,820 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T23:09:07,820 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T23:09:07,821 WARN [RedundancyMonitor {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK, ARCHIVE], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T23:09:07,825 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) All required storage types are unavailable: unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T23:09:07,825 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T23:09:07,825 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T23:09:07,825 DEBUG [RS:2;9e5ab3b623d7:41311 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:09:07,825 WARN [RedundancyMonitor {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK, ARCHIVE], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T23:09:07,826 DEBUG [RS:2;9e5ab3b623d7:41311 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33369 2024-12-02T23:09:07,826 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) All required storage types are unavailable: unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T23:09:07,826 DEBUG [RS:2;9e5ab3b623d7:41311 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-02T23:09:07,833 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [9e5ab3b623d7,39847,1733180945622] 2024-12-02T23:09:07,833 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [9e5ab3b623d7,38049,1733180945779] 2024-12-02T23:09:07,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741832_1008 (size=32) 2024-12-02T23:09:07,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T23:09:07,833 DEBUG [RS:1;9e5ab3b623d7:38049 {}] zookeeper.ZKUtil(111): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/9e5ab3b623d7,38049,1733180945779 2024-12-02T23:09:07,834 WARN [RS:1;9e5ab3b623d7:38049 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T23:09:07,834 INFO [RS:1;9e5ab3b623d7:38049 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T23:09:07,836 DEBUG [RS:1;9e5ab3b623d7:38049 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/WALs/9e5ab3b623d7,38049,1733180945779 2024-12-02T23:09:07,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741832_1008 (size=32) 2024-12-02T23:09:07,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741832_1008 (size=32) 2024-12-02T23:09:07,838 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [9e5ab3b623d7,41311,1733180945828] 2024-12-02T23:09:07,841 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:09:07,842 DEBUG [RS:2;9e5ab3b623d7:41311 {}] zookeeper.ZKUtil(111): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/9e5ab3b623d7,41311,1733180945828 2024-12-02T23:09:07,842 WARN [RS:2;9e5ab3b623d7:41311 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T23:09:07,843 INFO [RS:2;9e5ab3b623d7:41311 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T23:09:07,843 DEBUG [RS:2;9e5ab3b623d7:41311 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/WALs/9e5ab3b623d7,41311,1733180945828 2024-12-02T23:09:07,850 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T23:09:07,853 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T23:09:07,853 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:09:07,859 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T23:09:07,859 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T23:09:07,867 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T23:09:07,867 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:09:07,869 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T23:09:07,872 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T23:09:07,875 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T23:09:07,875 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:09:07,878 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T23:09:07,878 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T23:09:07,880 INFO [RS:2;9e5ab3b623d7:41311 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T23:09:07,891 INFO [RS:0;9e5ab3b623d7:39847 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T23:09:07,891 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T23:09:07,891 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:09:07,893 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T23:09:07,893 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T23:09:07,894 INFO [RS:1;9e5ab3b623d7:38049 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T23:09:07,900 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740 2024-12-02T23:09:07,907 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740 2024-12-02T23:09:07,911 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T23:09:07,911 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T23:09:07,912 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T23:09:07,916 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T23:09:07,919 INFO [RS:2;9e5ab3b623d7:41311 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T23:09:07,923 INFO [RS:0;9e5ab3b623d7:39847 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T23:09:07,931 INFO [RS:0;9e5ab3b623d7:39847 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T23:09:07,931 INFO [RS:0;9e5ab3b623d7:39847 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:07,934 INFO [RS:2;9e5ab3b623d7:41311 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T23:09:07,934 INFO [RS:2;9e5ab3b623d7:41311 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:07,941 INFO [RS:1;9e5ab3b623d7:38049 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T23:09:07,943 INFO [RS:0;9e5ab3b623d7:39847 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-02T23:09:07,946 INFO [RS:1;9e5ab3b623d7:38049 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T23:09:07,946 INFO [RS:1;9e5ab3b623d7:38049 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:07,951 INFO [RS:0;9e5ab3b623d7:39847 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-02T23:09:07,953 INFO [RS:0;9e5ab3b623d7:39847 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:07,953 DEBUG [RS:0;9e5ab3b623d7:39847 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/9e5ab3b623d7:0, corePoolSize=1, maxPoolSize=1 2024-12-02T23:09:07,953 INFO [RS:2;9e5ab3b623d7:41311 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-02T23:09:07,953 DEBUG [RS:0;9e5ab3b623d7:39847 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/9e5ab3b623d7:0, corePoolSize=1, maxPoolSize=1 2024-12-02T23:09:07,953 DEBUG [RS:0;9e5ab3b623d7:39847 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/9e5ab3b623d7:0, corePoolSize=1, maxPoolSize=1 2024-12-02T23:09:07,954 DEBUG [RS:0;9e5ab3b623d7:39847 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0, corePoolSize=1, maxPoolSize=1 2024-12-02T23:09:07,954 DEBUG [RS:0;9e5ab3b623d7:39847 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/9e5ab3b623d7:0, corePoolSize=1, maxPoolSize=1 2024-12-02T23:09:07,954 DEBUG [RS:0;9e5ab3b623d7:39847 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/9e5ab3b623d7:0, corePoolSize=2, maxPoolSize=2 2024-12-02T23:09:07,954 DEBUG [RS:0;9e5ab3b623d7:39847 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/9e5ab3b623d7:0, corePoolSize=1, maxPoolSize=1 2024-12-02T23:09:07,954 DEBUG [RS:0;9e5ab3b623d7:39847 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/9e5ab3b623d7:0, corePoolSize=1, maxPoolSize=1 2024-12-02T23:09:07,955 DEBUG [RS:0;9e5ab3b623d7:39847 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/9e5ab3b623d7:0, corePoolSize=1, maxPoolSize=1 2024-12-02T23:09:07,955 DEBUG [RS:0;9e5ab3b623d7:39847 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/9e5ab3b623d7:0, corePoolSize=1, maxPoolSize=1 2024-12-02T23:09:07,955 DEBUG [RS:0;9e5ab3b623d7:39847 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/9e5ab3b623d7:0, corePoolSize=1, maxPoolSize=1 2024-12-02T23:09:07,955 DEBUG [RS:0;9e5ab3b623d7:39847 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/9e5ab3b623d7:0, corePoolSize=1, maxPoolSize=1 2024-12-02T23:09:07,956 INFO [RS:2;9e5ab3b623d7:41311 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-02T23:09:07,956 INFO [RS:2;9e5ab3b623d7:41311 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:07,956 DEBUG [RS:2;9e5ab3b623d7:41311 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/9e5ab3b623d7:0, corePoolSize=1, maxPoolSize=1 2024-12-02T23:09:07,956 DEBUG [RS:2;9e5ab3b623d7:41311 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/9e5ab3b623d7:0, corePoolSize=1, maxPoolSize=1 2024-12-02T23:09:07,956 DEBUG [RS:2;9e5ab3b623d7:41311 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/9e5ab3b623d7:0, corePoolSize=1, maxPoolSize=1 2024-12-02T23:09:07,957 DEBUG [RS:2;9e5ab3b623d7:41311 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0, corePoolSize=1, maxPoolSize=1 2024-12-02T23:09:07,957 DEBUG [RS:2;9e5ab3b623d7:41311 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/9e5ab3b623d7:0, corePoolSize=1, maxPoolSize=1 2024-12-02T23:09:07,957 DEBUG [RS:2;9e5ab3b623d7:41311 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/9e5ab3b623d7:0, corePoolSize=2, maxPoolSize=2 2024-12-02T23:09:07,957 DEBUG [RS:2;9e5ab3b623d7:41311 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/9e5ab3b623d7:0, corePoolSize=1, maxPoolSize=1 2024-12-02T23:09:07,957 DEBUG [RS:2;9e5ab3b623d7:41311 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/9e5ab3b623d7:0, corePoolSize=1, maxPoolSize=1 2024-12-02T23:09:07,957 DEBUG [RS:2;9e5ab3b623d7:41311 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/9e5ab3b623d7:0, corePoolSize=1, maxPoolSize=1 2024-12-02T23:09:07,958 DEBUG [RS:2;9e5ab3b623d7:41311 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/9e5ab3b623d7:0, corePoolSize=1, maxPoolSize=1 2024-12-02T23:09:07,958 DEBUG [RS:0;9e5ab3b623d7:39847 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0, corePoolSize=3, maxPoolSize=3 2024-12-02T23:09:07,958 DEBUG [RS:2;9e5ab3b623d7:41311 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/9e5ab3b623d7:0, corePoolSize=1, maxPoolSize=1 2024-12-02T23:09:07,958 DEBUG [RS:2;9e5ab3b623d7:41311 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/9e5ab3b623d7:0, corePoolSize=1, maxPoolSize=1 2024-12-02T23:09:07,958 DEBUG [RS:0;9e5ab3b623d7:39847 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/9e5ab3b623d7:0, corePoolSize=3, maxPoolSize=3 2024-12-02T23:09:07,958 DEBUG [RS:2;9e5ab3b623d7:41311 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0, corePoolSize=3, maxPoolSize=3 2024-12-02T23:09:07,958 DEBUG [RS:2;9e5ab3b623d7:41311 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/9e5ab3b623d7:0, corePoolSize=3, maxPoolSize=3 2024-12-02T23:09:07,967 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T23:09:07,968 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63029914, jitterRate=-0.0607810914516449}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T23:09:07,971 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733180947841Initializing all the Stores at 1733180947849 (+8 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733180947849Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733180947849Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733180947850 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733180947850Cleaning up temporary data from old regions at 1733180947911 (+61 ms)Region opened successfully at 1733180947971 (+60 ms) 2024-12-02T23:09:07,972 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T23:09:07,972 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T23:09:07,972 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T23:09:07,972 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T23:09:07,972 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T23:09:07,985 INFO [RS:0;9e5ab3b623d7:39847 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:07,985 INFO [RS:0;9e5ab3b623d7:39847 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:07,986 INFO [RS:0;9e5ab3b623d7:39847 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:07,986 INFO [RS:0;9e5ab3b623d7:39847 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:07,986 INFO [RS:0;9e5ab3b623d7:39847 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:07,986 INFO [RS:0;9e5ab3b623d7:39847 {}] hbase.ChoreService(168): Chore ScheduledChore name=9e5ab3b623d7,39847,1733180945622-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T23:09:07,998 INFO [RS:1;9e5ab3b623d7:38049 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-02T23:09:08,002 INFO [RS:1;9e5ab3b623d7:38049 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-02T23:09:08,002 INFO [RS:1;9e5ab3b623d7:38049 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:08,004 DEBUG [RS:1;9e5ab3b623d7:38049 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/9e5ab3b623d7:0, corePoolSize=1, maxPoolSize=1 2024-12-02T23:09:08,005 DEBUG [RS:1;9e5ab3b623d7:38049 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/9e5ab3b623d7:0, corePoolSize=1, maxPoolSize=1 2024-12-02T23:09:08,005 DEBUG [RS:1;9e5ab3b623d7:38049 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/9e5ab3b623d7:0, corePoolSize=1, maxPoolSize=1 2024-12-02T23:09:08,005 DEBUG [RS:1;9e5ab3b623d7:38049 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0, corePoolSize=1, maxPoolSize=1 2024-12-02T23:09:08,005 DEBUG [RS:1;9e5ab3b623d7:38049 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/9e5ab3b623d7:0, corePoolSize=1, maxPoolSize=1 2024-12-02T23:09:08,006 DEBUG [RS:1;9e5ab3b623d7:38049 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/9e5ab3b623d7:0, corePoolSize=2, maxPoolSize=2 2024-12-02T23:09:08,006 DEBUG [RS:1;9e5ab3b623d7:38049 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/9e5ab3b623d7:0, corePoolSize=1, maxPoolSize=1 2024-12-02T23:09:08,006 DEBUG [RS:1;9e5ab3b623d7:38049 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/9e5ab3b623d7:0, corePoolSize=1, maxPoolSize=1 2024-12-02T23:09:08,006 DEBUG [RS:1;9e5ab3b623d7:38049 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/9e5ab3b623d7:0, corePoolSize=1, maxPoolSize=1 2024-12-02T23:09:08,007 DEBUG [RS:1;9e5ab3b623d7:38049 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/9e5ab3b623d7:0, corePoolSize=1, maxPoolSize=1 2024-12-02T23:09:08,007 DEBUG [RS:1;9e5ab3b623d7:38049 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/9e5ab3b623d7:0, corePoolSize=1, maxPoolSize=1 2024-12-02T23:09:08,007 DEBUG [RS:1;9e5ab3b623d7:38049 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/9e5ab3b623d7:0, corePoolSize=1, maxPoolSize=1 2024-12-02T23:09:08,007 DEBUG [RS:1;9e5ab3b623d7:38049 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0, corePoolSize=3, maxPoolSize=3 2024-12-02T23:09:08,007 DEBUG [RS:1;9e5ab3b623d7:38049 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/9e5ab3b623d7:0, corePoolSize=3, maxPoolSize=3 2024-12-02T23:09:08,008 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T23:09:08,008 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733180947972Disabling compacts and flushes for region at 1733180947972Disabling writes for close at 1733180947972Writing region close event to WAL at 1733180948008 (+36 ms)Closed at 1733180948008 2024-12-02T23:09:08,012 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T23:09:08,012 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-02T23:09:08,021 INFO [RS:2;9e5ab3b623d7:41311 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:08,021 INFO [RS:2;9e5ab3b623d7:41311 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:08,022 INFO [RS:2;9e5ab3b623d7:41311 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:08,022 INFO [RS:2;9e5ab3b623d7:41311 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:08,022 INFO [RS:2;9e5ab3b623d7:41311 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:08,022 INFO [RS:2;9e5ab3b623d7:41311 {}] hbase.ChoreService(168): Chore ScheduledChore name=9e5ab3b623d7,41311,1733180945828-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T23:09:08,023 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-02T23:09:08,038 INFO [RS:0;9e5ab3b623d7:39847 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T23:09:08,041 INFO [RS:0;9e5ab3b623d7:39847 {}] hbase.ChoreService(168): Chore ScheduledChore name=9e5ab3b623d7,39847,1733180945622-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:08,041 INFO [RS:0;9e5ab3b623d7:39847 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:08,042 INFO [RS:0;9e5ab3b623d7:39847 {}] regionserver.Replication(171): 9e5ab3b623d7,39847,1733180945622 started 2024-12-02T23:09:08,043 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T23:09:08,048 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-02T23:09:08,050 INFO [RS:2;9e5ab3b623d7:41311 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T23:09:08,050 INFO [RS:2;9e5ab3b623d7:41311 {}] hbase.ChoreService(168): Chore ScheduledChore name=9e5ab3b623d7,41311,1733180945828-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:08,050 INFO [RS:2;9e5ab3b623d7:41311 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:08,050 INFO [RS:2;9e5ab3b623d7:41311 {}] regionserver.Replication(171): 9e5ab3b623d7,41311,1733180945828 started 2024-12-02T23:09:08,072 INFO [RS:0;9e5ab3b623d7:39847 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:08,072 INFO [RS:0;9e5ab3b623d7:39847 {}] regionserver.HRegionServer(1482): Serving as 9e5ab3b623d7,39847,1733180945622, RpcServer on 9e5ab3b623d7/172.17.0.3:39847, sessionid=0x101503561da0001 2024-12-02T23:09:08,073 DEBUG [RS:0;9e5ab3b623d7:39847 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T23:09:08,074 DEBUG [RS:0;9e5ab3b623d7:39847 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 9e5ab3b623d7,39847,1733180945622 2024-12-02T23:09:08,074 DEBUG [RS:0;9e5ab3b623d7:39847 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9e5ab3b623d7,39847,1733180945622' 2024-12-02T23:09:08,074 DEBUG [RS:0;9e5ab3b623d7:39847 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T23:09:08,076 DEBUG [RS:0;9e5ab3b623d7:39847 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T23:09:08,077 INFO [RS:1;9e5ab3b623d7:38049 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:08,077 INFO [RS:1;9e5ab3b623d7:38049 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:08,077 DEBUG [RS:0;9e5ab3b623d7:39847 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T23:09:08,077 DEBUG [RS:0;9e5ab3b623d7:39847 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T23:09:08,077 INFO [RS:1;9e5ab3b623d7:38049 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:08,077 INFO [RS:1;9e5ab3b623d7:38049 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:08,077 DEBUG [RS:0;9e5ab3b623d7:39847 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 9e5ab3b623d7,39847,1733180945622 2024-12-02T23:09:08,077 INFO [RS:1;9e5ab3b623d7:38049 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:08,077 DEBUG [RS:0;9e5ab3b623d7:39847 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9e5ab3b623d7,39847,1733180945622' 2024-12-02T23:09:08,077 DEBUG [RS:0;9e5ab3b623d7:39847 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T23:09:08,077 INFO [RS:1;9e5ab3b623d7:38049 {}] hbase.ChoreService(168): Chore ScheduledChore name=9e5ab3b623d7,38049,1733180945779-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T23:09:08,078 DEBUG [RS:0;9e5ab3b623d7:39847 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T23:09:08,079 DEBUG [RS:0;9e5ab3b623d7:39847 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T23:09:08,079 INFO [RS:0;9e5ab3b623d7:39847 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T23:09:08,079 INFO [RS:0;9e5ab3b623d7:39847 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T23:09:08,095 INFO [RS:2;9e5ab3b623d7:41311 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:08,096 INFO [RS:2;9e5ab3b623d7:41311 {}] regionserver.HRegionServer(1482): Serving as 9e5ab3b623d7,41311,1733180945828, RpcServer on 9e5ab3b623d7/172.17.0.3:41311, sessionid=0x101503561da0003 2024-12-02T23:09:08,096 DEBUG [RS:2;9e5ab3b623d7:41311 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T23:09:08,096 DEBUG [RS:2;9e5ab3b623d7:41311 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 9e5ab3b623d7,41311,1733180945828 2024-12-02T23:09:08,096 DEBUG [RS:2;9e5ab3b623d7:41311 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9e5ab3b623d7,41311,1733180945828' 2024-12-02T23:09:08,096 DEBUG [RS:2;9e5ab3b623d7:41311 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T23:09:08,097 DEBUG [RS:2;9e5ab3b623d7:41311 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T23:09:08,098 DEBUG [RS:2;9e5ab3b623d7:41311 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T23:09:08,098 DEBUG [RS:2;9e5ab3b623d7:41311 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T23:09:08,098 DEBUG [RS:2;9e5ab3b623d7:41311 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 9e5ab3b623d7,41311,1733180945828 2024-12-02T23:09:08,098 DEBUG [RS:2;9e5ab3b623d7:41311 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9e5ab3b623d7,41311,1733180945828' 2024-12-02T23:09:08,098 DEBUG [RS:2;9e5ab3b623d7:41311 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T23:09:08,099 DEBUG [RS:2;9e5ab3b623d7:41311 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T23:09:08,100 DEBUG [RS:2;9e5ab3b623d7:41311 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T23:09:08,100 INFO [RS:2;9e5ab3b623d7:41311 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T23:09:08,100 INFO [RS:2;9e5ab3b623d7:41311 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T23:09:08,107 INFO [RS:1;9e5ab3b623d7:38049 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T23:09:08,107 INFO [RS:1;9e5ab3b623d7:38049 {}] hbase.ChoreService(168): Chore ScheduledChore name=9e5ab3b623d7,38049,1733180945779-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:08,107 INFO [RS:1;9e5ab3b623d7:38049 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:08,108 INFO [RS:1;9e5ab3b623d7:38049 {}] regionserver.Replication(171): 9e5ab3b623d7,38049,1733180945779 started 2024-12-02T23:09:08,131 INFO [RS:1;9e5ab3b623d7:38049 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:08,131 INFO [RS:1;9e5ab3b623d7:38049 {}] regionserver.HRegionServer(1482): Serving as 9e5ab3b623d7,38049,1733180945779, RpcServer on 9e5ab3b623d7/172.17.0.3:38049, sessionid=0x101503561da0002 2024-12-02T23:09:08,131 DEBUG [RS:1;9e5ab3b623d7:38049 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T23:09:08,132 DEBUG [RS:1;9e5ab3b623d7:38049 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 9e5ab3b623d7,38049,1733180945779 2024-12-02T23:09:08,132 DEBUG [RS:1;9e5ab3b623d7:38049 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9e5ab3b623d7,38049,1733180945779' 2024-12-02T23:09:08,132 DEBUG [RS:1;9e5ab3b623d7:38049 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T23:09:08,133 DEBUG [RS:1;9e5ab3b623d7:38049 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T23:09:08,134 DEBUG [RS:1;9e5ab3b623d7:38049 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T23:09:08,134 DEBUG [RS:1;9e5ab3b623d7:38049 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T23:09:08,134 DEBUG [RS:1;9e5ab3b623d7:38049 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 9e5ab3b623d7,38049,1733180945779 2024-12-02T23:09:08,134 DEBUG [RS:1;9e5ab3b623d7:38049 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9e5ab3b623d7,38049,1733180945779' 2024-12-02T23:09:08,134 DEBUG [RS:1;9e5ab3b623d7:38049 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T23:09:08,135 DEBUG [RS:1;9e5ab3b623d7:38049 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T23:09:08,136 DEBUG [RS:1;9e5ab3b623d7:38049 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T23:09:08,136 INFO [RS:1;9e5ab3b623d7:38049 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T23:09:08,136 INFO [RS:1;9e5ab3b623d7:38049 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T23:09:08,187 INFO [RS:0;9e5ab3b623d7:39847 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-02T23:09:08,191 INFO [RS:0;9e5ab3b623d7:39847 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9e5ab3b623d7%2C39847%2C1733180945622, suffix=, logDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/WALs/9e5ab3b623d7,39847,1733180945622, archiveDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/oldWALs, maxLogs=32 2024-12-02T23:09:08,199 WARN [9e5ab3b623d7:41513 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-02T23:09:08,202 INFO [RS:2;9e5ab3b623d7:41311 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-02T23:09:08,206 INFO [RS:2;9e5ab3b623d7:41311 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9e5ab3b623d7%2C41311%2C1733180945828, suffix=, logDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/WALs/9e5ab3b623d7,41311,1733180945828, archiveDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/oldWALs, maxLogs=32 2024-12-02T23:09:08,218 DEBUG [RS:0;9e5ab3b623d7:39847 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/WALs/9e5ab3b623d7,39847,1733180945622/9e5ab3b623d7%2C39847%2C1733180945622.1733180948195, exclude list is [], retry=0 2024-12-02T23:09:08,223 WARN [IPC Server handler 4 on default port 33369 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T23:09:08,223 WARN [IPC Server handler 4 on default port 33369 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T23:09:08,223 WARN [IPC Server handler 4 on default port 33369 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T23:09:08,228 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37623,DS-c00f0d44-9ed1-46ca-a8d0-e778f8c47b51,DISK] 2024-12-02T23:09:08,229 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39833,DS-fa88d329-7c97-4bbe-a2d8-6d912b0accb1,DISK] 2024-12-02T23:09:08,231 DEBUG [RS:2;9e5ab3b623d7:41311 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/WALs/9e5ab3b623d7,41311,1733180945828/9e5ab3b623d7%2C41311%2C1733180945828.1733180948209, exclude list is [], retry=0 2024-12-02T23:09:08,236 WARN [IPC Server handler 2 on default port 33369 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T23:09:08,236 WARN [IPC Server handler 2 on default port 33369 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T23:09:08,236 WARN [IPC Server handler 2 on default port 33369 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T23:09:08,237 INFO [RS:1;9e5ab3b623d7:38049 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-02T23:09:08,241 INFO [RS:1;9e5ab3b623d7:38049 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9e5ab3b623d7%2C38049%2C1733180945779, suffix=, logDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/WALs/9e5ab3b623d7,38049,1733180945779, archiveDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/oldWALs, maxLogs=32 2024-12-02T23:09:08,252 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39833,DS-fa88d329-7c97-4bbe-a2d8-6d912b0accb1,DISK] 2024-12-02T23:09:08,253 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37623,DS-c00f0d44-9ed1-46ca-a8d0-e778f8c47b51,DISK] 2024-12-02T23:09:08,264 INFO [RS:0;9e5ab3b623d7:39847 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/WALs/9e5ab3b623d7,39847,1733180945622/9e5ab3b623d7%2C39847%2C1733180945622.1733180948195 2024-12-02T23:09:08,265 DEBUG [RS:0;9e5ab3b623d7:39847 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46367:46367),(127.0.0.1/127.0.0.1:42697:42697)] 2024-12-02T23:09:08,298 INFO [RS:2;9e5ab3b623d7:41311 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/WALs/9e5ab3b623d7,41311,1733180945828/9e5ab3b623d7%2C41311%2C1733180945828.1733180948209 2024-12-02T23:09:08,302 DEBUG [RS:2;9e5ab3b623d7:41311 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42697:42697),(127.0.0.1/127.0.0.1:46367:46367)] 2024-12-02T23:09:08,308 DEBUG [RS:1;9e5ab3b623d7:38049 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/WALs/9e5ab3b623d7,38049,1733180945779/9e5ab3b623d7%2C38049%2C1733180945779.1733180948243, exclude list is [], retry=0 2024-12-02T23:09:08,314 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45149,DS-3ebde639-f20d-4e6f-b72f-9130658f7991,DISK] 2024-12-02T23:09:08,314 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39833,DS-fa88d329-7c97-4bbe-a2d8-6d912b0accb1,DISK] 2024-12-02T23:09:08,317 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37623,DS-c00f0d44-9ed1-46ca-a8d0-e778f8c47b51,DISK] 2024-12-02T23:09:08,338 INFO [RS:1;9e5ab3b623d7:38049 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/WALs/9e5ab3b623d7,38049,1733180945779/9e5ab3b623d7%2C38049%2C1733180945779.1733180948243 2024-12-02T23:09:08,344 DEBUG [RS:1;9e5ab3b623d7:38049 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46211:46211),(127.0.0.1/127.0.0.1:42697:42697),(127.0.0.1/127.0.0.1:46367:46367)] 2024-12-02T23:09:08,452 DEBUG [9e5ab3b623d7:41513 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-02T23:09:08,462 DEBUG [9e5ab3b623d7:41513 {}] balancer.BalancerClusterState(204): Hosts are {9e5ab3b623d7=0} racks are {/default-rack=0} 2024-12-02T23:09:08,481 DEBUG [9e5ab3b623d7:41513 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T23:09:08,481 DEBUG [9e5ab3b623d7:41513 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T23:09:08,481 DEBUG [9e5ab3b623d7:41513 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T23:09:08,481 DEBUG [9e5ab3b623d7:41513 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T23:09:08,481 DEBUG [9e5ab3b623d7:41513 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T23:09:08,481 DEBUG [9e5ab3b623d7:41513 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T23:09:08,481 INFO [9e5ab3b623d7:41513 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T23:09:08,481 INFO [9e5ab3b623d7:41513 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T23:09:08,481 INFO [9e5ab3b623d7:41513 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T23:09:08,481 DEBUG [9e5ab3b623d7:41513 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T23:09:08,491 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=9e5ab3b623d7,39847,1733180945622 2024-12-02T23:09:08,500 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 9e5ab3b623d7,39847,1733180945622, state=OPENING 2024-12-02T23:09:08,505 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-02T23:09:08,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T23:09:08,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T23:09:08,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T23:09:08,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T23:09:08,508 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T23:09:08,508 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T23:09:08,508 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T23:09:08,508 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T23:09:08,511 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T23:09:08,513 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=9e5ab3b623d7,39847,1733180945622}] 2024-12-02T23:09:08,696 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T23:09:08,704 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:50741, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T23:09:08,723 INFO [RS_OPEN_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-02T23:09:08,724 INFO [RS_OPEN_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T23:09:08,724 INFO [RS_OPEN_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-02T23:09:08,728 INFO [RS_OPEN_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9e5ab3b623d7%2C39847%2C1733180945622.meta, suffix=.meta, logDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/WALs/9e5ab3b623d7,39847,1733180945622, archiveDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/oldWALs, maxLogs=32 2024-12-02T23:09:08,750 DEBUG [RS_OPEN_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/WALs/9e5ab3b623d7,39847,1733180945622/9e5ab3b623d7%2C39847%2C1733180945622.meta.1733180948731.meta, exclude list is [], retry=0 2024-12-02T23:09:08,756 WARN [IPC Server handler 2 on default port 33369 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-02T23:09:08,756 WARN [IPC Server handler 2 on default port 33369 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-02T23:09:08,756 WARN [IPC Server handler 2 on default port 33369 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-02T23:09:08,762 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37623,DS-c00f0d44-9ed1-46ca-a8d0-e778f8c47b51,DISK] 2024-12-02T23:09:08,765 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39833,DS-fa88d329-7c97-4bbe-a2d8-6d912b0accb1,DISK] 2024-12-02T23:09:08,779 INFO [RS_OPEN_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/WALs/9e5ab3b623d7,39847,1733180945622/9e5ab3b623d7%2C39847%2C1733180945622.meta.1733180948731.meta 2024-12-02T23:09:08,780 DEBUG [RS_OPEN_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42697:42697),(127.0.0.1/127.0.0.1:46367:46367)] 2024-12-02T23:09:08,780 DEBUG [RS_OPEN_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-02T23:09:08,782 DEBUG [RS_OPEN_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-12-02T23:09:08,783 INFO [RS_OPEN_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T23:09:08,784 DEBUG [RS_OPEN_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-02T23:09:08,787 DEBUG [RS_OPEN_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-02T23:09:08,788 INFO [RS_OPEN_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-02T23:09:08,798 DEBUG [RS_OPEN_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-02T23:09:08,799 DEBUG [RS_OPEN_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:09:08,799 DEBUG [RS_OPEN_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-02T23:09:08,799 DEBUG [RS_OPEN_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-02T23:09:08,803 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T23:09:08,805 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T23:09:08,805 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:09:08,807 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T23:09:08,807 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T23:09:08,809 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T23:09:08,809 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:09:08,810 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T23:09:08,810 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T23:09:08,811 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T23:09:08,811 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:09:08,812 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T23:09:08,812 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T23:09:08,814 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T23:09:08,814 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:09:08,815 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T23:09:08,815 DEBUG [RS_OPEN_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T23:09:08,816 DEBUG [RS_OPEN_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740 2024-12-02T23:09:08,819 DEBUG [RS_OPEN_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740 2024-12-02T23:09:08,822 DEBUG [RS_OPEN_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T23:09:08,823 DEBUG [RS_OPEN_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T23:09:08,824 DEBUG [RS_OPEN_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T23:09:08,828 DEBUG [RS_OPEN_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T23:09:08,830 INFO [RS_OPEN_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62954752, jitterRate=-0.061901092529296875}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T23:09:08,830 DEBUG [RS_OPEN_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-02T23:09:08,834 DEBUG [RS_OPEN_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733180948800Writing region info on filesystem at 1733180948800Initializing all the Stores at 1733180948803 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733180948803Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733180948803Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733180948803Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733180948803Cleaning up temporary data from old regions at 1733180948823 (+20 ms)Running coprocessor post-open hooks at 1733180948830 (+7 ms)Region opened successfully at 1733180948834 (+4 ms) 2024-12-02T23:09:08,843 INFO [RS_OPEN_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733180948684 2024-12-02T23:09:08,859 DEBUG [RS_OPEN_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-02T23:09:08,860 INFO [RS_OPEN_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-02T23:09:08,862 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=9e5ab3b623d7,39847,1733180945622 2024-12-02T23:09:08,866 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 9e5ab3b623d7,39847,1733180945622, state=OPEN 2024-12-02T23:09:08,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T23:09:08,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T23:09:08,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T23:09:08,869 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T23:09:08,869 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T23:09:08,869 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T23:09:08,870 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=9e5ab3b623d7,39847,1733180945622 2024-12-02T23:09:08,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T23:09:08,873 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T23:09:08,879 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-02T23:09:08,880 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=9e5ab3b623d7,39847,1733180945622 in 357 msec 2024-12-02T23:09:08,891 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-02T23:09:08,891 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 860 msec 2024-12-02T23:09:08,893 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T23:09:08,894 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-02T23:09:08,933 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:09:08,935 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:09:08,969 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:09:08,987 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:44867, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:09:09,035 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.5960 sec 2024-12-02T23:09:09,036 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733180949035, completionTime=-1 2024-12-02T23:09:09,040 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-02T23:09:09,040 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-02T23:09:09,230 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-02T23:09:09,230 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733181009230 2024-12-02T23:09:09,230 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733181069230 2024-12-02T23:09:09,230 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 190 msec 2024-12-02T23:09:09,232 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-02T23:09:09,259 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9e5ab3b623d7,41513,1733180944669-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:09,259 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9e5ab3b623d7,41513,1733180944669-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:09,259 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9e5ab3b623d7,41513,1733180944669-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:09,266 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-9e5ab3b623d7:41513, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:09,274 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:09,283 DEBUG [master/9e5ab3b623d7:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-02T23:09:09,293 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:09,347 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 3.400sec 2024-12-02T23:09:09,350 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-02T23:09:09,352 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-02T23:09:09,353 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-02T23:09:09,354 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-02T23:09:09,354 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-02T23:09:09,355 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9e5ab3b623d7,41513,1733180944669-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T23:09:09,356 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9e5ab3b623d7,41513,1733180944669-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-02T23:09:09,398 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1cc5704d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:09:09,399 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-02T23:09:09,400 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] client.AsyncConnectionImpl(321): The fetched master address is 9e5ab3b623d7,41513,1733180944669 2024-12-02T23:09:09,403 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1816a7e5 2024-12-02T23:09:09,416 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-02T23:09:09,416 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-02T23:09:09,426 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-02T23:09:09,430 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52311, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-02T23:09:09,440 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:09:09,448 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41513 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'hbase:acl', {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T23:09:09,478 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:09:09,486 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:acl 2024-12-02T23:09:09,501 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T23:09:09,502 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:09:09,504 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41513 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "hbase" qualifier: "acl" procId is: 4 2024-12-02T23:09:09,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T23:09:09,515 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T23:09:09,544 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:09:09,549 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:09:09,552 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:09:09,553 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c5b714d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:09:09,553 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:09:09,560 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:09:09,575 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:09:09,576 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57098, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:09:09,582 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49feebc7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:09:09,583 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:09:09,597 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:09:09,598 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:09:09,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741837_1013 (size=349) 2024-12-02T23:09:09,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741837_1013 (size=349) 2024-12-02T23:09:09,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741837_1013 (size=349) 2024-12-02T23:09:09,618 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => a8d1ca9c63bdfff14551a9b4d2eb111b, NAME => 'hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:acl', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:09:09,619 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48388, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:09:09,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=9e5ab3b623d7,41513,1733180944669 2024-12-02T23:09:09,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2305): Starting mini mapreduce cluster... 2024-12-02T23:09:09,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T23:09:09,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/test.cache.data in system properties and HBase conf 2024-12-02T23:09:09,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop.tmp.dir in system properties and HBase conf 2024-12-02T23:09:09,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop.log.dir in system properties and HBase conf 2024-12-02T23:09:09,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-02T23:09:09,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-02T23:09:09,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-02T23:09:09,634 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-02T23:09:09,634 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-02T23:09:09,634 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-02T23:09:09,634 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T23:09:09,634 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-02T23:09:09,634 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-02T23:09:09,634 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T23:09:09,634 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T23:09:09,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-02T23:09:09,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/nfs.dump.dir in system properties and HBase conf 2024-12-02T23:09:09,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/java.io.tmpdir in system properties and HBase conf 2024-12-02T23:09:09,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T23:09:09,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-02T23:09:09,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-02T23:09:09,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741838_1014 (size=36) 2024-12-02T23:09:09,743 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:09:09,743 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1722): Closing a8d1ca9c63bdfff14551a9b4d2eb111b, disabling compactions & flushes 2024-12-02T23:09:09,743 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b. 2024-12-02T23:09:09,743 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b. 2024-12-02T23:09:09,743 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b. after waiting 0 ms 2024-12-02T23:09:09,744 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b. 2024-12-02T23:09:09,744 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1973): Closed hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b. 2024-12-02T23:09:09,744 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1676): Region close journal for a8d1ca9c63bdfff14551a9b4d2eb111b: Waiting for close lock at 1733180949743Disabling compacts and flushes for region at 1733180949743Disabling writes for close at 1733180949744 (+1 ms)Writing region close event to WAL at 1733180949744Closed at 1733180949744 2024-12-02T23:09:09,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741838_1014 (size=36) 2024-12-02T23:09:09,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741838_1014 (size=36) 2024-12-02T23:09:09,748 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T23:09:09,761 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b.","families":{"info":[{"qualifier":"regioninfo","vlen":35,"tag":[],"timestamp":"1733180949750"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733180949750"}]},"ts":"1733180949750"} 2024-12-02T23:09:09,781 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-02T23:09:09,785 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T23:09:09,795 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733180949785"}]},"ts":"1733180949785"} 2024-12-02T23:09:09,846 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLING in hbase:meta 2024-12-02T23:09:09,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T23:09:09,847 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {9e5ab3b623d7=0} racks are {/default-rack=0} 2024-12-02T23:09:09,854 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T23:09:09,854 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T23:09:09,854 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T23:09:09,854 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T23:09:09,854 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T23:09:09,854 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T23:09:09,854 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T23:09:09,854 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T23:09:09,854 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T23:09:09,854 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T23:09:09,856 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=a8d1ca9c63bdfff14551a9b4d2eb111b, ASSIGN}] 2024-12-02T23:09:09,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741839_1015 (size=592039) 2024-12-02T23:09:09,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741839_1015 (size=592039) 2024-12-02T23:09:09,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741839_1015 (size=592039) 2024-12-02T23:09:09,866 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=a8d1ca9c63bdfff14551a9b4d2eb111b, ASSIGN 2024-12-02T23:09:09,882 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=a8d1ca9c63bdfff14551a9b4d2eb111b, ASSIGN; state=OFFLINE, location=9e5ab3b623d7,41311,1733180945828; forceNewPlan=false, retain=false 2024-12-02T23:09:10,036 INFO [9e5ab3b623d7:41513 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-02T23:09:10,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741840_1016 (size=1663647) 2024-12-02T23:09:10,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741840_1016 (size=1663647) 2024-12-02T23:09:10,042 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a8d1ca9c63bdfff14551a9b4d2eb111b, regionState=OPENING, regionLocation=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:09:10,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741840_1016 (size=1663647) 2024-12-02T23:09:10,066 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=a8d1ca9c63bdfff14551a9b4d2eb111b, ASSIGN because future has completed 2024-12-02T23:09:10,081 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a8d1ca9c63bdfff14551a9b4d2eb111b, server=9e5ab3b623d7,41311,1733180945828}] 2024-12-02T23:09:10,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T23:09:10,323 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T23:09:10,349 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40011, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T23:09:10,386 INFO [RS_OPEN_PRIORITY_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(132): Open hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b. 2024-12-02T23:09:10,387 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => a8d1ca9c63bdfff14551a9b4d2eb111b, NAME => 'hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b.', STARTKEY => '', ENDKEY => ''} 2024-12-02T23:09:10,387 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b. service=AccessControlService 2024-12-02T23:09:10,388 INFO [RS_OPEN_PRIORITY_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T23:09:10,389 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl a8d1ca9c63bdfff14551a9b4d2eb111b 2024-12-02T23:09:10,389 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(898): Instantiated hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:09:10,389 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for a8d1ca9c63bdfff14551a9b4d2eb111b 2024-12-02T23:09:10,389 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for a8d1ca9c63bdfff14551a9b4d2eb111b 2024-12-02T23:09:10,409 INFO [StoreOpener-a8d1ca9c63bdfff14551a9b4d2eb111b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region a8d1ca9c63bdfff14551a9b4d2eb111b 2024-12-02T23:09:10,417 INFO [StoreOpener-a8d1ca9c63bdfff14551a9b4d2eb111b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a8d1ca9c63bdfff14551a9b4d2eb111b columnFamilyName l 2024-12-02T23:09:10,417 DEBUG [StoreOpener-a8d1ca9c63bdfff14551a9b4d2eb111b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:09:10,419 INFO [StoreOpener-a8d1ca9c63bdfff14551a9b4d2eb111b-1 {}] regionserver.HStore(327): Store=a8d1ca9c63bdfff14551a9b4d2eb111b/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T23:09:10,419 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for a8d1ca9c63bdfff14551a9b4d2eb111b 2024-12-02T23:09:10,421 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/acl/a8d1ca9c63bdfff14551a9b4d2eb111b 2024-12-02T23:09:10,422 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/acl/a8d1ca9c63bdfff14551a9b4d2eb111b 2024-12-02T23:09:10,423 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for a8d1ca9c63bdfff14551a9b4d2eb111b 2024-12-02T23:09:10,423 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for a8d1ca9c63bdfff14551a9b4d2eb111b 2024-12-02T23:09:10,429 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for a8d1ca9c63bdfff14551a9b4d2eb111b 2024-12-02T23:09:10,447 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/acl/a8d1ca9c63bdfff14551a9b4d2eb111b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T23:09:10,448 INFO [RS_OPEN_PRIORITY_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1114): Opened a8d1ca9c63bdfff14551a9b4d2eb111b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60022791, jitterRate=-0.10559071600437164}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T23:09:10,449 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a8d1ca9c63bdfff14551a9b4d2eb111b 2024-12-02T23:09:10,453 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for a8d1ca9c63bdfff14551a9b4d2eb111b: Running coprocessor pre-open hook at 1733180950389Writing region info on filesystem at 1733180950390 (+1 ms)Initializing all the Stores at 1733180950400 (+10 ms)Instantiating store for column family {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733180950400Cleaning up temporary data from old regions at 1733180950423 (+23 ms)Running coprocessor post-open hooks at 1733180950449 (+26 ms)Region opened successfully at 1733180950452 (+3 ms) 2024-12-02T23:09:10,457 INFO [RS_OPEN_PRIORITY_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b., pid=6, masterSystemTime=1733180950323 2024-12-02T23:09:10,463 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b. 2024-12-02T23:09:10,463 INFO [RS_OPEN_PRIORITY_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(153): Opened hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b. 2024-12-02T23:09:10,464 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a8d1ca9c63bdfff14551a9b4d2eb111b, regionState=OPEN, openSeqNum=2, regionLocation=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:09:10,469 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a8d1ca9c63bdfff14551a9b4d2eb111b, server=9e5ab3b623d7,41311,1733180945828 because future has completed 2024-12-02T23:09:10,481 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-02T23:09:10,482 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure a8d1ca9c63bdfff14551a9b4d2eb111b, server=9e5ab3b623d7,41311,1733180945828 in 390 msec 2024-12-02T23:09:10,490 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-02T23:09:10,490 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=a8d1ca9c63bdfff14551a9b4d2eb111b, ASSIGN in 626 msec 2024-12-02T23:09:10,492 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T23:09:10,493 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733180950492"}]},"ts":"1733180950492"} 2024-12-02T23:09:10,498 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLED in hbase:meta 2024-12-02T23:09:10,500 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T23:09:10,506 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:acl in 1.0420 sec 2024-12-02T23:09:10,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T23:09:10,670 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: hbase:acl completed 2024-12-02T23:09:10,677 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-02T23:09:10,679 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-02T23:09:10,679 INFO [master/9e5ab3b623d7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9e5ab3b623d7,41513,1733180944669-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T23:09:12,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741827_1003 (size=196) 2024-12-02T23:09:12,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741828_1004 (size=1189) 2024-12-02T23:09:12,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741831_1007 (size=1321) 2024-12-02T23:09:12,599 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T23:09:12,738 WARN [Thread-379 {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T23:09:13,131 INFO [Thread-379 {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T23:09:13,162 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-02T23:09:13,163 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T23:09:13,206 INFO [Thread-379 {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T23:09:13,206 INFO [Thread-379 {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T23:09:13,206 INFO [Thread-379 {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T23:09:13,214 INFO [Thread-379 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3f0a6d06{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop.log.dir/,AVAILABLE} 2024-12-02T23:09:13,215 INFO [Thread-379 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2da1d21b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-02T23:09:13,264 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T23:09:13,265 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T23:09:13,265 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T23:09:13,271 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T23:09:13,303 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2a34a45f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop.log.dir/,AVAILABLE} 2024-12-02T23:09:13,303 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6c8f1c0d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-02T23:09:13,476 INFO [Thread-379 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver as a provider class 2024-12-02T23:09:13,478 INFO [Thread-379 {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices as a root resource class 2024-12-02T23:09:13,478 INFO [Thread-379 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-02T23:09:13,480 INFO [Thread-379 {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-02T23:09:13,600 INFO [Thread-379 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-02T23:09:14,354 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T23:09:14,497 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-12-02T23:09:14,500 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-02T23:09:14,523 INFO [Thread-379 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-02T23:09:15,064 INFO [Thread-379 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-02T23:09:15,111 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@75ef03f8{cluster,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/java.io.tmpdir/jetty-localhost-44047-hadoop-yarn-common-3_4_1_jar-_-any-1311911228981161514/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-02T23:09:15,112 INFO [Thread-379 {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6e13c6ec{jobhistory,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/java.io.tmpdir/jetty-localhost-45111-hadoop-yarn-common-3_4_1_jar-_-any-3377852490562490578/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-02T23:09:15,112 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2ae84ec1{HTTP/1.1, (http/1.1)}{localhost:44047} 2024-12-02T23:09:15,112 INFO [Time-limited test {}] server.Server(415): Started @18641ms 2024-12-02T23:09:15,116 INFO [Thread-379 {}] server.AbstractConnector(333): Started ServerConnector@635471c{HTTP/1.1, (http/1.1)}{localhost:45111} 2024-12-02T23:09:15,116 INFO [Thread-379 {}] server.Server(415): Started @18644ms 2024-12-02T23:09:15,206 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:09:15,206 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionServerObservers 2024-12-02T23:09:15,207 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-02T23:09:15,207 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-02T23:09:15,209 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-02T23:09:15,209 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_acl Metrics about Tables on a single HBase RegionServer 2024-12-02T23:09:15,210 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:09:15,210 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase MasterObservers 2024-12-02T23:09:15,211 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-02T23:09:15,211 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver Metrics about HBase MasterObservers 2024-12-02T23:09:15,211 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:09:15,211 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionObservers 2024-12-02T23:09:15,211 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T23:09:15,212 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-02T23:09:15,212 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-02T23:09:15,212 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-02T23:09:15,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741841_1017 (size=5) 2024-12-02T23:09:15,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741841_1017 (size=5) 2024-12-02T23:09:15,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741841_1017 (size=5) 2024-12-02T23:09:17,249 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-02T23:09:17,256 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T23:09:17,319 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-02T23:09:17,321 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T23:09:17,414 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T23:09:17,414 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T23:09:17,414 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T23:09:17,422 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T23:09:17,437 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e8faea6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop.log.dir/,AVAILABLE} 2024-12-02T23:09:17,438 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f1cb8d8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-02T23:09:17,528 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-02T23:09:17,528 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-02T23:09:17,528 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-02T23:09:17,528 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-02T23:09:17,540 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-02T23:09:17,585 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-02T23:09:17,761 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-02T23:09:17,778 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5c15c38e{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/java.io.tmpdir/jetty-localhost-46387-hadoop-yarn-common-3_4_1_jar-_-any-12744663352753906574/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-02T23:09:17,779 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4847c338{HTTP/1.1, (http/1.1)}{localhost:46387} 2024-12-02T23:09:17,779 INFO [Time-limited test {}] server.Server(415): Started @21308ms 2024-12-02T23:09:18,034 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-02T23:09:18,038 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T23:09:18,057 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-02T23:09:18,058 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T23:09:18,071 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T23:09:18,071 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T23:09:18,071 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T23:09:18,072 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T23:09:18,073 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@72e92192{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop.log.dir/,AVAILABLE} 2024-12-02T23:09:18,074 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e5e644{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-02T23:09:18,139 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-02T23:09:18,139 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-02T23:09:18,139 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-02T23:09:18,139 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-02T23:09:18,150 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-02T23:09:18,156 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-02T23:09:18,336 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-02T23:09:18,352 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@64ab2e36{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/java.io.tmpdir/jetty-localhost-39539-hadoop-yarn-common-3_4_1_jar-_-any-9381321811309691600/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-02T23:09:18,353 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@31c68162{HTTP/1.1, (http/1.1)}{localhost:39539} 2024-12-02T23:09:18,353 INFO [Time-limited test {}] server.Server(415): Started @21882ms 2024-12-02T23:09:18,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2341): Mini mapreduce cluster started 2024-12-02T23:09:18,431 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T23:09:18,496 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSplitRegion Thread=711, OpenFileDescriptor=759, MaxFileDescriptor=1048576, SystemLoadAverage=732, ProcessCount=11, AvailableMemoryMB=5985 2024-12-02T23:09:18,502 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=711 is superior to 500 2024-12-02T23:09:18,511 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-02T23:09:18,518 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 9e5ab3b623d7,41513,1733180944669 2024-12-02T23:09:18,518 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@637d7af5 2024-12-02T23:09:18,519 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-02T23:09:18,529 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40066, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-02T23:09:18,531 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T23:09:18,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T23:09:18,537 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T23:09:18,540 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSplitRegion" procId is: 7 2024-12-02T23:09:18,541 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T23:09:18,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-02T23:09:18,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741842_1018 (size=458) 2024-12-02T23:09:18,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741842_1018 (size=458) 2024-12-02T23:09:18,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741842_1018 (size=458) 2024-12-02T23:09:18,590 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 047f49362811e322054c938a41b68e9e, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733180958530.047f49362811e322054c938a41b68e9e.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:09:18,594 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 3de09e2f50b4bb1a4fe80eb412d983f6, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733180958530.3de09e2f50b4bb1a4fe80eb412d983f6.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:09:18,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741843_1019 (size=83) 2024-12-02T23:09:18,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741843_1019 (size=83) 2024-12-02T23:09:18,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741843_1019 (size=83) 2024-12-02T23:09:18,639 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,,1733180958530.047f49362811e322054c938a41b68e9e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:09:18,640 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1722): Closing 047f49362811e322054c938a41b68e9e, disabling compactions & flushes 2024-12-02T23:09:18,640 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,,1733180958530.047f49362811e322054c938a41b68e9e. 2024-12-02T23:09:18,640 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733180958530.047f49362811e322054c938a41b68e9e. 2024-12-02T23:09:18,640 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733180958530.047f49362811e322054c938a41b68e9e. after waiting 0 ms 2024-12-02T23:09:18,640 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,,1733180958530.047f49362811e322054c938a41b68e9e. 2024-12-02T23:09:18,640 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,,1733180958530.047f49362811e322054c938a41b68e9e. 2024-12-02T23:09:18,640 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 047f49362811e322054c938a41b68e9e: Waiting for close lock at 1733180958640Disabling compacts and flushes for region at 1733180958640Disabling writes for close at 1733180958640Writing region close event to WAL at 1733180958640Closed at 1733180958640 2024-12-02T23:09:18,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741844_1020 (size=83) 2024-12-02T23:09:18,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741844_1020 (size=83) 2024-12-02T23:09:18,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741844_1020 (size=83) 2024-12-02T23:09:18,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-02T23:09:18,665 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,1,1733180958530.3de09e2f50b4bb1a4fe80eb412d983f6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:09:18,665 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1722): Closing 3de09e2f50b4bb1a4fe80eb412d983f6, disabling compactions & flushes 2024-12-02T23:09:18,665 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,1,1733180958530.3de09e2f50b4bb1a4fe80eb412d983f6. 2024-12-02T23:09:18,665 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733180958530.3de09e2f50b4bb1a4fe80eb412d983f6. 2024-12-02T23:09:18,665 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733180958530.3de09e2f50b4bb1a4fe80eb412d983f6. after waiting 0 ms 2024-12-02T23:09:18,665 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,1,1733180958530.3de09e2f50b4bb1a4fe80eb412d983f6. 2024-12-02T23:09:18,666 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,1,1733180958530.3de09e2f50b4bb1a4fe80eb412d983f6. 2024-12-02T23:09:18,666 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for 3de09e2f50b4bb1a4fe80eb412d983f6: Waiting for close lock at 1733180958665Disabling compacts and flushes for region at 1733180958665Disabling writes for close at 1733180958665Writing region close event to WAL at 1733180958666 (+1 ms)Closed at 1733180958666 2024-12-02T23:09:18,668 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T23:09:18,669 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSplitRegion,,1733180958530.047f49362811e322054c938a41b68e9e.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733180958669"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733180958669"}]},"ts":"1733180958669"} 2024-12-02T23:09:18,669 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSplitRegion,1,1733180958530.3de09e2f50b4bb1a4fe80eb412d983f6.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733180958669"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733180958669"}]},"ts":"1733180958669"} 2024-12-02T23:09:18,721 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T23:09:18,724 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T23:09:18,725 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733180958724"}]},"ts":"1733180958724"} 2024-12-02T23:09:18,728 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=ENABLING in hbase:meta 2024-12-02T23:09:18,728 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {9e5ab3b623d7=0} racks are {/default-rack=0} 2024-12-02T23:09:18,731 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T23:09:18,731 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T23:09:18,732 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T23:09:18,732 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T23:09:18,732 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T23:09:18,732 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T23:09:18,732 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T23:09:18,732 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T23:09:18,732 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T23:09:18,732 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T23:09:18,732 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=047f49362811e322054c938a41b68e9e, ASSIGN}, {pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=3de09e2f50b4bb1a4fe80eb412d983f6, ASSIGN}] 2024-12-02T23:09:18,738 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=047f49362811e322054c938a41b68e9e, ASSIGN 2024-12-02T23:09:18,739 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=3de09e2f50b4bb1a4fe80eb412d983f6, ASSIGN 2024-12-02T23:09:18,744 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=047f49362811e322054c938a41b68e9e, ASSIGN; state=OFFLINE, location=9e5ab3b623d7,38049,1733180945779; forceNewPlan=false, retain=false 2024-12-02T23:09:18,744 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=3de09e2f50b4bb1a4fe80eb412d983f6, ASSIGN; state=OFFLINE, location=9e5ab3b623d7,41311,1733180945828; forceNewPlan=false, retain=false 2024-12-02T23:09:18,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-02T23:09:18,895 INFO [9e5ab3b623d7:41513 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T23:09:18,895 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=3de09e2f50b4bb1a4fe80eb412d983f6, regionState=OPENING, regionLocation=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:09:18,896 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=047f49362811e322054c938a41b68e9e, regionState=OPENING, regionLocation=9e5ab3b623d7,38049,1733180945779 2024-12-02T23:09:18,902 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=3de09e2f50b4bb1a4fe80eb412d983f6, ASSIGN because future has completed 2024-12-02T23:09:18,909 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3de09e2f50b4bb1a4fe80eb412d983f6, server=9e5ab3b623d7,41311,1733180945828}] 2024-12-02T23:09:18,911 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=047f49362811e322054c938a41b68e9e, ASSIGN because future has completed 2024-12-02T23:09:18,912 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 047f49362811e322054c938a41b68e9e, server=9e5ab3b623d7,38049,1733180945779}] 2024-12-02T23:09:19,069 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T23:09:19,101 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSplitRegion,1,1733180958530.3de09e2f50b4bb1a4fe80eb412d983f6. 2024-12-02T23:09:19,102 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => 3de09e2f50b4bb1a4fe80eb412d983f6, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733180958530.3de09e2f50b4bb1a4fe80eb412d983f6.', STARTKEY => '1', ENDKEY => ''} 2024-12-02T23:09:19,102 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSplitRegion,1,1733180958530.3de09e2f50b4bb1a4fe80eb412d983f6. service=AccessControlService 2024-12-02T23:09:19,103 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T23:09:19,103 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSplitRegion 3de09e2f50b4bb1a4fe80eb412d983f6 2024-12-02T23:09:19,103 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,1,1733180958530.3de09e2f50b4bb1a4fe80eb412d983f6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:09:19,103 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for 3de09e2f50b4bb1a4fe80eb412d983f6 2024-12-02T23:09:19,103 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for 3de09e2f50b4bb1a4fe80eb412d983f6 2024-12-02T23:09:19,111 INFO [StoreOpener-3de09e2f50b4bb1a4fe80eb412d983f6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3de09e2f50b4bb1a4fe80eb412d983f6 2024-12-02T23:09:19,130 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41421, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T23:09:19,139 INFO [StoreOpener-3de09e2f50b4bb1a4fe80eb412d983f6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3de09e2f50b4bb1a4fe80eb412d983f6 columnFamilyName cf 2024-12-02T23:09:19,147 DEBUG [StoreOpener-3de09e2f50b4bb1a4fe80eb412d983f6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:09:19,148 INFO [StoreOpener-3de09e2f50b4bb1a4fe80eb412d983f6-1 {}] regionserver.HStore(327): Store=3de09e2f50b4bb1a4fe80eb412d983f6/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T23:09:19,152 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for 3de09e2f50b4bb1a4fe80eb412d983f6 2024-12-02T23:09:19,153 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSplitRegion/3de09e2f50b4bb1a4fe80eb412d983f6 2024-12-02T23:09:19,154 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSplitRegion,,1733180958530.047f49362811e322054c938a41b68e9e. 2024-12-02T23:09:19,155 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7752): Opening region: {ENCODED => 047f49362811e322054c938a41b68e9e, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733180958530.047f49362811e322054c938a41b68e9e.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T23:09:19,155 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSplitRegion,,1733180958530.047f49362811e322054c938a41b68e9e. service=AccessControlService 2024-12-02T23:09:19,155 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSplitRegion/3de09e2f50b4bb1a4fe80eb412d983f6 2024-12-02T23:09:19,156 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T23:09:19,156 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSplitRegion 047f49362811e322054c938a41b68e9e 2024-12-02T23:09:19,156 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for 3de09e2f50b4bb1a4fe80eb412d983f6 2024-12-02T23:09:19,156 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for 3de09e2f50b4bb1a4fe80eb412d983f6 2024-12-02T23:09:19,156 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,,1733180958530.047f49362811e322054c938a41b68e9e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:09:19,156 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7794): checking encryption for 047f49362811e322054c938a41b68e9e 2024-12-02T23:09:19,157 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7797): checking classloading for 047f49362811e322054c938a41b68e9e 2024-12-02T23:09:19,160 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for 3de09e2f50b4bb1a4fe80eb412d983f6 2024-12-02T23:09:19,165 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSplitRegion/3de09e2f50b4bb1a4fe80eb412d983f6/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T23:09:19,166 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1114): Opened 3de09e2f50b4bb1a4fe80eb412d983f6; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72846943, jitterRate=0.08550404012203217}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T23:09:19,167 INFO [StoreOpener-047f49362811e322054c938a41b68e9e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 047f49362811e322054c938a41b68e9e 2024-12-02T23:09:19,167 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3de09e2f50b4bb1a4fe80eb412d983f6 2024-12-02T23:09:19,168 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for 3de09e2f50b4bb1a4fe80eb412d983f6: Running coprocessor pre-open hook at 1733180959104Writing region info on filesystem at 1733180959104Initializing all the Stores at 1733180959106 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733180959106Cleaning up temporary data from old regions at 1733180959156 (+50 ms)Running coprocessor post-open hooks at 1733180959167 (+11 ms)Region opened successfully at 1733180959168 (+1 ms) 2024-12-02T23:09:19,172 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSplitRegion,1,1733180958530.3de09e2f50b4bb1a4fe80eb412d983f6., pid=10, masterSystemTime=1733180959068 2024-12-02T23:09:19,172 INFO [StoreOpener-047f49362811e322054c938a41b68e9e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 047f49362811e322054c938a41b68e9e columnFamilyName cf 2024-12-02T23:09:19,174 DEBUG [StoreOpener-047f49362811e322054c938a41b68e9e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:09:19,175 INFO [StoreOpener-047f49362811e322054c938a41b68e9e-1 {}] regionserver.HStore(327): Store=047f49362811e322054c938a41b68e9e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T23:09:19,176 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1038): replaying wal for 047f49362811e322054c938a41b68e9e 2024-12-02T23:09:19,178 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSplitRegion/047f49362811e322054c938a41b68e9e 2024-12-02T23:09:19,179 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=3de09e2f50b4bb1a4fe80eb412d983f6, regionState=OPEN, openSeqNum=2, regionLocation=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:09:19,179 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSplitRegion/047f49362811e322054c938a41b68e9e 2024-12-02T23:09:19,180 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1048): stopping wal replay for 047f49362811e322054c938a41b68e9e 2024-12-02T23:09:19,180 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1060): Cleaning up temporary data for 047f49362811e322054c938a41b68e9e 2024-12-02T23:09:19,182 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSplitRegion,1,1733180958530.3de09e2f50b4bb1a4fe80eb412d983f6. 2024-12-02T23:09:19,183 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSplitRegion,1,1733180958530.3de09e2f50b4bb1a4fe80eb412d983f6. 2024-12-02T23:09:19,184 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3de09e2f50b4bb1a4fe80eb412d983f6, server=9e5ab3b623d7,41311,1733180945828 because future has completed 2024-12-02T23:09:19,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-02T23:09:19,191 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1093): writing seq id for 047f49362811e322054c938a41b68e9e 2024-12-02T23:09:19,196 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-12-02T23:09:19,197 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure 3de09e2f50b4bb1a4fe80eb412d983f6, server=9e5ab3b623d7,41311,1733180945828 in 279 msec 2024-12-02T23:09:19,201 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=3de09e2f50b4bb1a4fe80eb412d983f6, ASSIGN in 465 msec 2024-12-02T23:09:19,207 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSplitRegion/047f49362811e322054c938a41b68e9e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T23:09:19,208 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1114): Opened 047f49362811e322054c938a41b68e9e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69492457, jitterRate=0.035518303513526917}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T23:09:19,208 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 047f49362811e322054c938a41b68e9e 2024-12-02T23:09:19,208 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1006): Region open journal for 047f49362811e322054c938a41b68e9e: Running coprocessor pre-open hook at 1733180959157Writing region info on filesystem at 1733180959157Initializing all the Stores at 1733180959165 (+8 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733180959165Cleaning up temporary data from old regions at 1733180959180 (+15 ms)Running coprocessor post-open hooks at 1733180959208 (+28 ms)Region opened successfully at 1733180959208 2024-12-02T23:09:19,213 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSplitRegion,,1733180958530.047f49362811e322054c938a41b68e9e., pid=11, masterSystemTime=1733180959068 2024-12-02T23:09:19,216 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSplitRegion,,1733180958530.047f49362811e322054c938a41b68e9e. 2024-12-02T23:09:19,217 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSplitRegion,,1733180958530.047f49362811e322054c938a41b68e9e. 2024-12-02T23:09:19,218 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=047f49362811e322054c938a41b68e9e, regionState=OPEN, openSeqNum=2, regionLocation=9e5ab3b623d7,38049,1733180945779 2024-12-02T23:09:19,222 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 047f49362811e322054c938a41b68e9e, server=9e5ab3b623d7,38049,1733180945779 because future has completed 2024-12-02T23:09:19,229 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41513 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=9e5ab3b623d7,38049,1733180945779, table=testtb-testExportFileSystemStateWithSplitRegion, region=047f49362811e322054c938a41b68e9e. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-02T23:09:19,255 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=8 2024-12-02T23:09:19,255 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=8, state=SUCCESS, hasLock=false; OpenRegionProcedure 047f49362811e322054c938a41b68e9e, server=9e5ab3b623d7,38049,1733180945779 in 318 msec 2024-12-02T23:09:19,260 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-02T23:09:19,260 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=047f49362811e322054c938a41b68e9e, ASSIGN in 523 msec 2024-12-02T23:09:19,262 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T23:09:19,262 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733180959262"}]},"ts":"1733180959262"} 2024-12-02T23:09:19,265 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=ENABLED in hbase:meta 2024-12-02T23:09:19,270 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T23:09:19,275 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSplitRegion jenkins: RWXCA 2024-12-02T23:09:19,301 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b., hostname=9e5ab3b623d7,41311,1733180945828, seqNum=2] 2024-12-02T23:09:19,302 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:09:19,305 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33473, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:09:19,310 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41311 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:09:19,310 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41311 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:09:19,311 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41311 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:09:19,338 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54905, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=ClientService 2024-12-02T23:09:19,341 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b., hostname=9e5ab3b623d7,41311,1733180945828, seqNum=2] 2024-12-02T23:09:19,342 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:09:19,344 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60033, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=ClientService 2024-12-02T23:09:19,346 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41311 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-02T23:09:19,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-02T23:09:19,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-02T23:09:19,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-02T23:09:19,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-02T23:09:19,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T23:09:19,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T23:09:19,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T23:09:19,389 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T23:09:19,394 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T23:09:19,394 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T23:09:19,395 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T23:09:19,395 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T23:09:19,399 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 863 msec 2024-12-02T23:09:19,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-02T23:09:19,694 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-02T23:09:19,697 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-02T23:09:19,703 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T23:09:19,704 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSplitRegion,,1733180958530.047f49362811e322054c938a41b68e9e. 2024-12-02T23:09:19,705 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T23:09:19,707 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-02T23:09:19,721 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-02T23:09:19,728 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:09:19,738 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40720, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:09:19,755 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:09:19,763 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33768, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:09:19,765 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-02T23:09:19,782 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-02T23:09:19,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733180959782 (current time:1733180959782). 2024-12-02T23:09:19,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T23:09:19,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-02T23:09:19,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T23:09:19,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16bf9b67, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:09:19,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:09:19,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:09:19,791 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:09:19,792 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:09:19,792 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:09:19,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:09:19,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ec60c85, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:09:19,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:09:19,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:09:19,797 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40086, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:09:19,803 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7dbeefbb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:09:19,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:09:19,811 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:09:19,812 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:09:19,814 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49360, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:09:19,820 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:09:19,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:09:19,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:09:19,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:09:19,830 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:09:19,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75d60885, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:09:19,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:09:19,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:09:19,834 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:09:19,834 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:09:19,835 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:09:19,835 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79ec31f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:09:19,835 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:09:19,835 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:09:19,835 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:09:19,837 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40112, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:09:19,838 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e807bb9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:09:19,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:09:19,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:09:19,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:09:19,846 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49372, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:09:19,849 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b., hostname=9e5ab3b623d7,41311,1733180945828, seqNum=2] 2024-12-02T23:09:19,849 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:09:19,851 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33774, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:09:19,853 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:09:19,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:09:19,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:09:19,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:09:19,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-02T23:09:19,855 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:09:19,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T23:09:19,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-02T23:09:19,881 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T23:09:19,889 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T23:09:19,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-12-02T23:09:19,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-02T23:09:19,913 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T23:09:19,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741845_1021 (size=215) 2024-12-02T23:09:19,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741845_1021 (size=215) 2024-12-02T23:09:19,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741845_1021 (size=215) 2024-12-02T23:09:19,982 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T23:09:19,984 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 047f49362811e322054c938a41b68e9e}, {pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3de09e2f50b4bb1a4fe80eb412d983f6}] 2024-12-02T23:09:19,995 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 047f49362811e322054c938a41b68e9e 2024-12-02T23:09:19,995 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3de09e2f50b4bb1a4fe80eb412d983f6 2024-12-02T23:09:20,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-02T23:09:20,154 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41311 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=14 2024-12-02T23:09:20,154 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38049 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=13 2024-12-02T23:09:20,155 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733180958530.3de09e2f50b4bb1a4fe80eb412d983f6. 2024-12-02T23:09:20,155 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733180958530.047f49362811e322054c938a41b68e9e. 2024-12-02T23:09:20,161 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 3de09e2f50b4bb1a4fe80eb412d983f6: 2024-12-02T23:09:20,161 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.HRegion(2603): Flush status journal for 047f49362811e322054c938a41b68e9e: 2024-12-02T23:09:20,161 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,,1733180958530.047f49362811e322054c938a41b68e9e. for emptySnaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-02T23:09:20,161 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,1,1733180958530.3de09e2f50b4bb1a4fe80eb412d983f6. for emptySnaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-02T23:09:20,162 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,1,1733180958530.3de09e2f50b4bb1a4fe80eb412d983f6.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T23:09:20,162 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,,1733180958530.047f49362811e322054c938a41b68e9e.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T23:09:20,165 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:09:20,165 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:09:20,168 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T23:09:20,168 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T23:09:20,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741847_1023 (size=86) 2024-12-02T23:09:20,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741847_1023 (size=86) 2024-12-02T23:09:20,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741847_1023 (size=86) 2024-12-02T23:09:20,211 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733180958530.3de09e2f50b4bb1a4fe80eb412d983f6. 2024-12-02T23:09:20,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-02T23:09:20,213 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-12-02T23:09:20,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741846_1022 (size=86) 2024-12-02T23:09:20,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741846_1022 (size=86) 2024-12-02T23:09:20,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-12-02T23:09:20,216 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion on region 3de09e2f50b4bb1a4fe80eb412d983f6 2024-12-02T23:09:20,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741846_1022 (size=86) 2024-12-02T23:09:20,217 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3de09e2f50b4bb1a4fe80eb412d983f6 2024-12-02T23:09:20,217 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733180958530.047f49362811e322054c938a41b68e9e. 2024-12-02T23:09:20,217 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-02T23:09:20,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=13 2024-12-02T23:09:20,219 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion on region 047f49362811e322054c938a41b68e9e 2024-12-02T23:09:20,220 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 047f49362811e322054c938a41b68e9e 2024-12-02T23:09:20,222 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3de09e2f50b4bb1a4fe80eb412d983f6 in 235 msec 2024-12-02T23:09:20,226 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=12 2024-12-02T23:09:20,226 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 047f49362811e322054c938a41b68e9e in 238 msec 2024-12-02T23:09:20,226 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T23:09:20,229 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T23:09:20,235 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T23:09:20,235 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T23:09:20,236 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:09:20,237 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-02T23:09:20,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741848_1024 (size=78) 2024-12-02T23:09:20,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741848_1024 (size=78) 2024-12-02T23:09:20,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741848_1024 (size=78) 2024-12-02T23:09:20,269 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T23:09:20,269 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T23:09:20,273 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T23:09:20,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741849_1025 (size=713) 2024-12-02T23:09:20,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741849_1025 (size=713) 2024-12-02T23:09:20,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741849_1025 (size=713) 2024-12-02T23:09:20,325 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T23:09:20,348 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T23:09:20,350 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSplitRegion to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T23:09:20,359 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T23:09:20,359 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-12-02T23:09:20,363 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 493 msec 2024-12-02T23:09:20,487 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSplitRegion' 2024-12-02T23:09:20,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-02T23:09:20,524 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-02T23:09:20,559 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38049 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSplitRegion,,1733180958530.047f49362811e322054c938a41b68e9e. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T23:09:20,570 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41311 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSplitRegion,1,1733180958530.3de09e2f50b4bb1a4fe80eb412d983f6. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T23:09:20,579 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-02T23:09:20,585 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T23:09:20,585 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSplitRegion,,1733180958530.047f49362811e322054c938a41b68e9e. 2024-12-02T23:09:20,585 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T23:09:20,588 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-02T23:09:20,595 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-02T23:09:20,604 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-02T23:09:20,608 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-02T23:09:20,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733180960608 (current time:1733180960608). 2024-12-02T23:09:20,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T23:09:20,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-02T23:09:20,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T23:09:20,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2fdb731a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:09:20,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:09:20,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:09:20,610 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:09:20,610 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:09:20,611 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:09:20,611 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7314bd1f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:09:20,611 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:09:20,611 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:09:20,612 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:09:20,613 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40142, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:09:20,614 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14172682, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:09:20,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:09:20,615 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:09:20,615 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:09:20,616 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49382, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:09:20,618 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:09:20,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:09:20,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:09:20,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:09:20,618 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:09:20,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@411d9425, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:09:20,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:09:20,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:09:20,620 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:09:20,620 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:09:20,620 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:09:20,621 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bdbd3b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:09:20,621 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:09:20,622 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:09:20,622 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:09:20,622 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40158, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:09:20,623 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6aa8e61c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:09:20,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:09:20,625 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:09:20,625 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:09:20,626 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49394, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:09:20,629 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b., hostname=9e5ab3b623d7,41311,1733180945828, seqNum=2] 2024-12-02T23:09:20,629 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:09:20,630 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33786, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:09:20,632 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:09:20,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:09:20,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:09:20,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:09:20,632 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:09:20,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-02T23:09:20,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T23:09:20,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-02T23:09:20,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-12-02T23:09:20,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-02T23:09:20,638 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T23:09:20,640 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T23:09:20,644 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T23:09:20,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-02T23:09:20,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741850_1026 (size=210) 2024-12-02T23:09:20,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741850_1026 (size=210) 2024-12-02T23:09:20,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741850_1026 (size=210) 2024-12-02T23:09:20,787 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T23:09:20,787 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 047f49362811e322054c938a41b68e9e}, {pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3de09e2f50b4bb1a4fe80eb412d983f6}] 2024-12-02T23:09:20,792 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3de09e2f50b4bb1a4fe80eb412d983f6 2024-12-02T23:09:20,792 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 047f49362811e322054c938a41b68e9e 2024-12-02T23:09:20,948 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38049 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=16 2024-12-02T23:09:20,950 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733180958530.047f49362811e322054c938a41b68e9e. 2024-12-02T23:09:20,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-02T23:09:20,954 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41311 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=17 2024-12-02T23:09:20,955 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2902): Flushing 047f49362811e322054c938a41b68e9e 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-02T23:09:20,956 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733180958530.3de09e2f50b4bb1a4fe80eb412d983f6. 2024-12-02T23:09:20,957 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2902): Flushing 3de09e2f50b4bb1a4fe80eb412d983f6 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-02T23:09:21,050 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202015af06e493b45eea7f3120f42d22377_047f49362811e322054c938a41b68e9e is 71, key is 010d8d9f80cc1d3c7b2a7dcd37240864/cf:q/1733180960559/Put/seqid=0 2024-12-02T23:09:21,055 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241202a5585914680a4d11a1e81b78be14c49f_3de09e2f50b4bb1a4fe80eb412d983f6 is 71, key is 1562256a7598ba5509399a03bfb25eb1/cf:q/1733180960570/Put/seqid=0 2024-12-02T23:09:21,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741851_1027 (size=5032) 2024-12-02T23:09:21,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741851_1027 (size=5032) 2024-12-02T23:09:21,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741851_1027 (size=5032) 2024-12-02T23:09:21,119 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:09:21,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741852_1028 (size=8241) 2024-12-02T23:09:21,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741852_1028 (size=8241) 2024-12-02T23:09:21,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741852_1028 (size=8241) 2024-12-02T23:09:21,136 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:09:21,215 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202015af06e493b45eea7f3120f42d22377_047f49362811e322054c938a41b68e9e to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e20241202015af06e493b45eea7f3120f42d22377_047f49362811e322054c938a41b68e9e 2024-12-02T23:09:21,216 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241202a5585914680a4d11a1e81b78be14c49f_3de09e2f50b4bb1a4fe80eb412d983f6 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b20241202a5585914680a4d11a1e81b78be14c49f_3de09e2f50b4bb1a4fe80eb412d983f6 2024-12-02T23:09:21,218 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSplitRegion/047f49362811e322054c938a41b68e9e/.tmp/cf/53ed2530e40342189507eac752e5c90c, store: [table=testtb-testExportFileSystemStateWithSplitRegion family=cf region=047f49362811e322054c938a41b68e9e] 2024-12-02T23:09:21,225 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSplitRegion/3de09e2f50b4bb1a4fe80eb412d983f6/.tmp/cf/b7349dac625a4ff0927263c7fbaf6c64, store: [table=testtb-testExportFileSystemStateWithSplitRegion family=cf region=3de09e2f50b4bb1a4fe80eb412d983f6] 2024-12-02T23:09:21,238 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSplitRegion/047f49362811e322054c938a41b68e9e/.tmp/cf/53ed2530e40342189507eac752e5c90c is 224, key is 05f9526234c9e3d186dc87e79a635e18e/cf:q/1733180960559/Put/seqid=0 2024-12-02T23:09:21,245 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSplitRegion/3de09e2f50b4bb1a4fe80eb412d983f6/.tmp/cf/b7349dac625a4ff0927263c7fbaf6c64 is 224, key is 1f9e15fc8cb2b3bfff1ed66a65686bdbe/cf:q/1733180960570/Put/seqid=0 2024-12-02T23:09:21,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741853_1029 (size=5754) 2024-12-02T23:09:21,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741853_1029 (size=5754) 2024-12-02T23:09:21,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741853_1029 (size=5754) 2024-12-02T23:09:21,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-02T23:09:21,267 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=132, hasBloomFilter=true, into tmp file hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSplitRegion/047f49362811e322054c938a41b68e9e/.tmp/cf/53ed2530e40342189507eac752e5c90c 2024-12-02T23:09:21,284 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSplitRegion/047f49362811e322054c938a41b68e9e/.tmp/cf/53ed2530e40342189507eac752e5c90c as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSplitRegion/047f49362811e322054c938a41b68e9e/cf/53ed2530e40342189507eac752e5c90c 2024-12-02T23:09:21,296 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSplitRegion/047f49362811e322054c938a41b68e9e/cf/53ed2530e40342189507eac752e5c90c, entries=2, sequenceid=6, filesize=5.6 K 2024-12-02T23:09:21,307 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 047f49362811e322054c938a41b68e9e in 351ms, sequenceid=6, compaction requested=false 2024-12-02T23:09:21,307 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2603): Flush status journal for 047f49362811e322054c938a41b68e9e: 2024-12-02T23:09:21,307 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,,1733180958530.047f49362811e322054c938a41b68e9e. for snaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-02T23:09:21,307 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,,1733180958530.047f49362811e322054c938a41b68e9e.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T23:09:21,307 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:09:21,307 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSplitRegion/047f49362811e322054c938a41b68e9e/cf/53ed2530e40342189507eac752e5c90c] hfiles 2024-12-02T23:09:21,309 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSplitRegion/047f49362811e322054c938a41b68e9e/cf/53ed2530e40342189507eac752e5c90c for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T23:09:21,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741854_1030 (size=15937) 2024-12-02T23:09:21,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741854_1030 (size=15937) 2024-12-02T23:09:21,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741854_1030 (size=15937) 2024-12-02T23:09:21,344 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSplitRegion/3de09e2f50b4bb1a4fe80eb412d983f6/.tmp/cf/b7349dac625a4ff0927263c7fbaf6c64 2024-12-02T23:09:21,369 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSplitRegion/3de09e2f50b4bb1a4fe80eb412d983f6/.tmp/cf/b7349dac625a4ff0927263c7fbaf6c64 as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSplitRegion/3de09e2f50b4bb1a4fe80eb412d983f6/cf/b7349dac625a4ff0927263c7fbaf6c64 2024-12-02T23:09:21,379 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSplitRegion/3de09e2f50b4bb1a4fe80eb412d983f6/cf/b7349dac625a4ff0927263c7fbaf6c64, entries=48, sequenceid=6, filesize=15.6 K 2024-12-02T23:09:21,382 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 3de09e2f50b4bb1a4fe80eb412d983f6 in 426ms, sequenceid=6, compaction requested=false 2024-12-02T23:09:21,382 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2603): Flush status journal for 3de09e2f50b4bb1a4fe80eb412d983f6: 2024-12-02T23:09:21,382 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,1,1733180958530.3de09e2f50b4bb1a4fe80eb412d983f6. for snaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-02T23:09:21,382 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,1,1733180958530.3de09e2f50b4bb1a4fe80eb412d983f6.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T23:09:21,382 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:09:21,383 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSplitRegion/3de09e2f50b4bb1a4fe80eb412d983f6/cf/b7349dac625a4ff0927263c7fbaf6c64] hfiles 2024-12-02T23:09:21,383 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSplitRegion/3de09e2f50b4bb1a4fe80eb412d983f6/cf/b7349dac625a4ff0927263c7fbaf6c64 for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T23:09:21,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741855_1031 (size=125) 2024-12-02T23:09:21,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741855_1031 (size=125) 2024-12-02T23:09:21,397 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733180958530.047f49362811e322054c938a41b68e9e. 2024-12-02T23:09:21,397 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=16 2024-12-02T23:09:21,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=16 2024-12-02T23:09:21,398 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSplitRegion on region 047f49362811e322054c938a41b68e9e 2024-12-02T23:09:21,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741855_1031 (size=125) 2024-12-02T23:09:21,399 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 047f49362811e322054c938a41b68e9e 2024-12-02T23:09:21,403 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 047f49362811e322054c938a41b68e9e in 614 msec 2024-12-02T23:09:21,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741856_1032 (size=125) 2024-12-02T23:09:21,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741856_1032 (size=125) 2024-12-02T23:09:21,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741856_1032 (size=125) 2024-12-02T23:09:21,429 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733180958530.3de09e2f50b4bb1a4fe80eb412d983f6. 2024-12-02T23:09:21,429 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-02T23:09:21,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=17 2024-12-02T23:09:21,430 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSplitRegion on region 3de09e2f50b4bb1a4fe80eb412d983f6 2024-12-02T23:09:21,430 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3de09e2f50b4bb1a4fe80eb412d983f6 2024-12-02T23:09:21,439 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=17, resume processing ppid=15 2024-12-02T23:09:21,439 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3de09e2f50b4bb1a4fe80eb412d983f6 in 647 msec 2024-12-02T23:09:21,439 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T23:09:21,440 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T23:09:21,443 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T23:09:21,443 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T23:09:21,443 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:09:21,445 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b20241202a5585914680a4d11a1e81b78be14c49f_3de09e2f50b4bb1a4fe80eb412d983f6, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e20241202015af06e493b45eea7f3120f42d22377_047f49362811e322054c938a41b68e9e] hfiles 2024-12-02T23:09:21,445 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b20241202a5585914680a4d11a1e81b78be14c49f_3de09e2f50b4bb1a4fe80eb412d983f6 2024-12-02T23:09:21,445 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e20241202015af06e493b45eea7f3120f42d22377_047f49362811e322054c938a41b68e9e 2024-12-02T23:09:21,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741857_1033 (size=309) 2024-12-02T23:09:21,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741857_1033 (size=309) 2024-12-02T23:09:21,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741857_1033 (size=309) 2024-12-02T23:09:21,477 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T23:09:21,477 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T23:09:21,479 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T23:09:21,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741858_1034 (size=1023) 2024-12-02T23:09:21,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741858_1034 (size=1023) 2024-12-02T23:09:21,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741858_1034 (size=1023) 2024-12-02T23:09:21,501 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T23:09:21,520 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T23:09:21,521 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSplitRegion to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T23:09:21,525 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T23:09:21,525 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-12-02T23:09:21,529 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 893 msec 2024-12-02T23:09:21,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-02T23:09:21,773 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-02T23:09:21,805 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T23:09:21,806 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T23:09:21,808 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T23:09:21,810 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57950, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T23:09:21,810 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42866, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T23:09:21,811 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38049 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-02T23:09:21,811 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39847 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-02T23:09:21,812 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41222, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T23:09:21,812 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41311 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-02T23:09:21,819 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testExportFileSystemStateWithSplitRegion', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T23:09:21,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=18, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-02T23:09:21,823 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T23:09:21,823 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:09:21,824 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testExportFileSystemStateWithSplitRegion" procId is: 18 2024-12-02T23:09:21,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-02T23:09:21,829 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T23:09:21,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741859_1035 (size=390) 2024-12-02T23:09:21,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741859_1035 (size=390) 2024-12-02T23:09:21,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741859_1035 (size=390) 2024-12-02T23:09:21,919 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 62aeb8e89e443c08bcf2c307416afdb9, NAME => 'testExportFileSystemStateWithSplitRegion,,1733180961818.62aeb8e89e443c08bcf2c307416afdb9.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:09:21,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-02T23:09:21,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741860_1036 (size=75) 2024-12-02T23:09:21,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741860_1036 (size=75) 2024-12-02T23:09:21,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741860_1036 (size=75) 2024-12-02T23:09:21,969 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733180961818.62aeb8e89e443c08bcf2c307416afdb9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:09:21,969 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1722): Closing 62aeb8e89e443c08bcf2c307416afdb9, disabling compactions & flushes 2024-12-02T23:09:21,969 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733180961818.62aeb8e89e443c08bcf2c307416afdb9. 2024-12-02T23:09:21,969 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733180961818.62aeb8e89e443c08bcf2c307416afdb9. 2024-12-02T23:09:21,969 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733180961818.62aeb8e89e443c08bcf2c307416afdb9. after waiting 0 ms 2024-12-02T23:09:21,969 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733180961818.62aeb8e89e443c08bcf2c307416afdb9. 2024-12-02T23:09:21,970 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733180961818.62aeb8e89e443c08bcf2c307416afdb9. 2024-12-02T23:09:21,970 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 62aeb8e89e443c08bcf2c307416afdb9: Waiting for close lock at 1733180961969Disabling compacts and flushes for region at 1733180961969Disabling writes for close at 1733180961969Writing region close event to WAL at 1733180961970 (+1 ms)Closed at 1733180961970 2024-12-02T23:09:21,973 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T23:09:21,975 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportFileSystemStateWithSplitRegion,,1733180961818.62aeb8e89e443c08bcf2c307416afdb9.","families":{"info":[{"qualifier":"regioninfo","vlen":74,"tag":[],"timestamp":"1733180961973"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733180961973"}]},"ts":"1733180961973"} 2024-12-02T23:09:21,980 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-02T23:09:21,982 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T23:09:21,983 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733180961983"}]},"ts":"1733180961983"} 2024-12-02T23:09:21,995 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=ENABLING in hbase:meta 2024-12-02T23:09:21,996 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {9e5ab3b623d7=0} racks are {/default-rack=0} 2024-12-02T23:09:21,997 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T23:09:21,997 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T23:09:21,997 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T23:09:21,997 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T23:09:21,997 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T23:09:21,997 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T23:09:21,997 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T23:09:21,997 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T23:09:21,997 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T23:09:21,997 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T23:09:21,998 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=62aeb8e89e443c08bcf2c307416afdb9, ASSIGN}] 2024-12-02T23:09:22,000 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=62aeb8e89e443c08bcf2c307416afdb9, ASSIGN 2024-12-02T23:09:22,002 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=62aeb8e89e443c08bcf2c307416afdb9, ASSIGN; state=OFFLINE, location=9e5ab3b623d7,41311,1733180945828; forceNewPlan=false, retain=false 2024-12-02T23:09:22,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-02T23:09:22,152 INFO [9e5ab3b623d7:41513 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-02T23:09:22,153 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=62aeb8e89e443c08bcf2c307416afdb9, regionState=OPENING, regionLocation=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:09:22,159 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=62aeb8e89e443c08bcf2c307416afdb9, ASSIGN because future has completed 2024-12-02T23:09:22,170 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure 62aeb8e89e443c08bcf2c307416afdb9, server=9e5ab3b623d7,41311,1733180945828}] 2024-12-02T23:09:22,343 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,,1733180961818.62aeb8e89e443c08bcf2c307416afdb9. 2024-12-02T23:09:22,343 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7752): Opening region: {ENCODED => 62aeb8e89e443c08bcf2c307416afdb9, NAME => 'testExportFileSystemStateWithSplitRegion,,1733180961818.62aeb8e89e443c08bcf2c307416afdb9.', STARTKEY => '', ENDKEY => ''} 2024-12-02T23:09:22,344 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,,1733180961818.62aeb8e89e443c08bcf2c307416afdb9. service=AccessControlService 2024-12-02T23:09:22,344 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T23:09:22,345 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion 62aeb8e89e443c08bcf2c307416afdb9 2024-12-02T23:09:22,345 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733180961818.62aeb8e89e443c08bcf2c307416afdb9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:09:22,345 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7794): checking encryption for 62aeb8e89e443c08bcf2c307416afdb9 2024-12-02T23:09:22,345 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7797): checking classloading for 62aeb8e89e443c08bcf2c307416afdb9 2024-12-02T23:09:22,350 INFO [StoreOpener-62aeb8e89e443c08bcf2c307416afdb9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 62aeb8e89e443c08bcf2c307416afdb9 2024-12-02T23:09:22,353 INFO [StoreOpener-62aeb8e89e443c08bcf2c307416afdb9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 62aeb8e89e443c08bcf2c307416afdb9 columnFamilyName cf 2024-12-02T23:09:22,353 DEBUG [StoreOpener-62aeb8e89e443c08bcf2c307416afdb9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:09:22,355 INFO [StoreOpener-62aeb8e89e443c08bcf2c307416afdb9-1 {}] regionserver.HStore(327): Store=62aeb8e89e443c08bcf2c307416afdb9/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T23:09:22,355 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1038): replaying wal for 62aeb8e89e443c08bcf2c307416afdb9 2024-12-02T23:09:22,356 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/62aeb8e89e443c08bcf2c307416afdb9 2024-12-02T23:09:22,357 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/62aeb8e89e443c08bcf2c307416afdb9 2024-12-02T23:09:22,358 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1048): stopping wal replay for 62aeb8e89e443c08bcf2c307416afdb9 2024-12-02T23:09:22,358 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1060): Cleaning up temporary data for 62aeb8e89e443c08bcf2c307416afdb9 2024-12-02T23:09:22,361 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1093): writing seq id for 62aeb8e89e443c08bcf2c307416afdb9 2024-12-02T23:09:22,368 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/62aeb8e89e443c08bcf2c307416afdb9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T23:09:22,373 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1114): Opened 62aeb8e89e443c08bcf2c307416afdb9; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63894566, jitterRate=-0.047896772623062134}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T23:09:22,373 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 62aeb8e89e443c08bcf2c307416afdb9 2024-12-02T23:09:22,378 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1006): Region open journal for 62aeb8e89e443c08bcf2c307416afdb9: Running coprocessor pre-open hook at 1733180962345Writing region info on filesystem at 1733180962345Initializing all the Stores at 1733180962349 (+4 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733180962349Cleaning up temporary data from old regions at 1733180962358 (+9 ms)Running coprocessor post-open hooks at 1733180962373 (+15 ms)Region opened successfully at 1733180962378 (+5 ms) 2024-12-02T23:09:22,382 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,,1733180961818.62aeb8e89e443c08bcf2c307416afdb9., pid=20, masterSystemTime=1733180962337 2024-12-02T23:09:22,393 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=62aeb8e89e443c08bcf2c307416afdb9, regionState=OPEN, openSeqNum=2, regionLocation=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:09:22,397 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,,1733180961818.62aeb8e89e443c08bcf2c307416afdb9. 2024-12-02T23:09:22,398 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,,1733180961818.62aeb8e89e443c08bcf2c307416afdb9. 2024-12-02T23:09:22,406 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure 62aeb8e89e443c08bcf2c307416afdb9, server=9e5ab3b623d7,41311,1733180945828 because future has completed 2024-12-02T23:09:22,411 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41513 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=9e5ab3b623d7,41311,1733180945828, table=testExportFileSystemStateWithSplitRegion, region=62aeb8e89e443c08bcf2c307416afdb9. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-02T23:09:22,420 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=20, resume processing ppid=19 2024-12-02T23:09:22,421 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=20, ppid=19, state=SUCCESS, hasLock=false; OpenRegionProcedure 62aeb8e89e443c08bcf2c307416afdb9, server=9e5ab3b623d7,41311,1733180945828 in 248 msec 2024-12-02T23:09:22,424 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=19, resume processing ppid=18 2024-12-02T23:09:22,424 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=19, ppid=18, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=62aeb8e89e443c08bcf2c307416afdb9, ASSIGN in 423 msec 2024-12-02T23:09:22,426 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T23:09:22,426 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733180962426"}]},"ts":"1733180962426"} 2024-12-02T23:09:22,429 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=ENABLED in hbase:meta 2024-12-02T23:09:22,440 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T23:09:22,440 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testExportFileSystemStateWithSplitRegion jenkins: RWXCA 2024-12-02T23:09:22,446 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41311 {}] access.PermissionStorage(613): Read acl: entry[testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-02T23:09:22,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:09:22,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:09:22,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:09:22,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:09:22,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-02T23:09:22,453 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T23:09:22,455 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T23:09:22,455 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T23:09:22,457 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T23:09:22,457 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T23:09:22,457 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T23:09:22,458 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T23:09:22,458 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T23:09:22,462 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=18, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion in 634 msec 2024-12-02T23:09:22,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-02T23:09:22,963 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-02T23:09:22,964 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T23:09:22,967 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T23:09:24,613 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T23:09:25,205 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion 2024-12-02T23:09:25,205 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion Metrics about Tables on a single HBase RegionServer 2024-12-02T23:09:25,213 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T23:09:25,213 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion Metrics about Tables on a single HBase RegionServer 2024-12-02T23:09:26,500 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportFileSystemStateWithSplitRegion' 2024-12-02T23:09:26,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741861_1037 (size=134217728) 2024-12-02T23:09:26,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741861_1037 (size=134217728) 2024-12-02T23:09:26,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741861_1037 (size=134217728) 2024-12-02T23:09:29,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741862_1038 (size=134217728) 2024-12-02T23:09:29,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741862_1038 (size=134217728) 2024-12-02T23:09:29,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741862_1038 (size=134217728) 2024-12-02T23:09:29,926 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/output/cf/test_file is 35, key is 1\x00\x00\x00/cf:q/1733180962972/Put/seqid=0 2024-12-02T23:09:29,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741863_1039 (size=51979256) 2024-12-02T23:09:29,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741863_1039 (size=51979256) 2024-12-02T23:09:29,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741863_1039 (size=51979256) 2024-12-02T23:09:29,995 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3973e750, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:09:29,995 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:09:29,996 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:09:30,007 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:09:30,007 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:09:30,007 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:09:30,008 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3954b311, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:09:30,008 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:09:30,009 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:09:30,025 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:09:30,033 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41700, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:09:30,037 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f4861ea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:09:30,037 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:09:30,044 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:09:30,044 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:09:30,066 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57960, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:09:30,084 WARN [Time-limited test {}] tool.BulkLoadHFilesTool$1(330): Trying to bulk load hfile hdfs://localhost:33369/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/output/cf/test_file with size: 320414712 bytes can be problematic as it may lead to oversplitting. 2024-12-02T23:09:30,085 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-02T23:09:30,086 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.AsyncConnectionImpl(321): The fetched master address is 9e5ab3b623d7,41513,1733180944669 2024-12-02T23:09:30,086 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@43970539 2024-12-02T23:09:30,087 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-02T23:09:30,116 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41714, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-02T23:09:30,123 WARN [IPC Server handler 2 on default port 33369 {}] namenode.FSNamesystem(6314): trying to get DT with no secret manager running 2024-12-02T23:09:30,131 DEBUG [RPCClient-NioEventLoopGroup-6-12 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportFileSystemStateWithSplitRegion', row='', locateType=CURRENT is [region=testExportFileSystemStateWithSplitRegion,,1733180961818.62aeb8e89e443c08bcf2c307416afdb9., hostname=9e5ab3b623d7,41311,1733180945828, seqNum=2] 2024-12-02T23:09:30,135 DEBUG [RPCClient-NioEventLoopGroup-6-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:09:30,137 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41232, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:09:30,189 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-02T23:09:30,215 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:33369/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/output/cf/test_file first=Optional[1\x00\x00\x00] last=Optional[9\x00\x00\x00] 2024-12-02T23:09:30,246 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39847 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-02T23:09:30,251 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39847 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: ExecService size: 101 connection: 172.17.0.3:54905 deadline: 1733181030245, exception=org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 2024-12-02T23:09:30,253 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41311 {}] regionserver.SecureBulkLoadManager(227): unable to add token java.util.concurrent.ExecutionException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:396) ~[?:?] at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073) ~[?:?] at org.apache.hadoop.hbase.regionserver.SecureBulkLoadManager.secureBulkLoadHFiles(SecureBulkLoadManager.java:221) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.bulkLoadHFile(RSRpcServices.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43510) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] Caused by: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.RegionCoprocessorRpcChannelImpl.lambda$rpcCall$0(RegionCoprocessorRpcChannelImpl.java:90) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T23:09:30,255 WARN [IPC Server handler 2 on default port 33369 {}] namenode.FSNamesystem(6314): trying to get DT with no secret manager running 2024-12-02T23:09:30,340 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41311 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:33369/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/output/cf/test_file for inclusion in 62aeb8e89e443c08bcf2c307416afdb9/cf 2024-12-02T23:09:30,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41311 {}] regionserver.HStore(626): HFile bounds: first=1\x00\x00\x00 last=9\x00\x00\x00 2024-12-02T23:09:30,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41311 {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-02T23:09:30,356 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41311 {}] regionserver.HStore(641): Trying to bulk load hfile hdfs://localhost:33369/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/output/cf/test_file with size: 320414712 bytes can be problematic as it may lead to oversplitting. 2024-12-02T23:09:30,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41311 {}] regionserver.HRegion(2603): Flush status journal for 62aeb8e89e443c08bcf2c307416afdb9: 2024-12-02T23:09:30,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41311 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(397): Moving hdfs://localhost:33369/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/output/cf/test_file to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/staging/jenkins__testExportFileSystemStateWithSplitRegion__g2rcg1mfkjd289pi171hj092nck7roa7btq73a1rlh12ge6n6n7bakebb5k926di/cf/test_file 2024-12-02T23:09:30,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41311 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/staging/jenkins__testExportFileSystemStateWithSplitRegion__g2rcg1mfkjd289pi171hj092nck7roa7btq73a1rlh12ge6n6n7bakebb5k926di/cf/test_file as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/62aeb8e89e443c08bcf2c307416afdb9/cf/62b48487498549db84901af7bfe771b8_SeqId_4_ 2024-12-02T23:09:30,379 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41311 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/staging/jenkins__testExportFileSystemStateWithSplitRegion__g2rcg1mfkjd289pi171hj092nck7roa7btq73a1rlh12ge6n6n7bakebb5k926di/cf/test_file into 62aeb8e89e443c08bcf2c307416afdb9/cf as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/62aeb8e89e443c08bcf2c307416afdb9/cf/62b48487498549db84901af7bfe771b8_SeqId_4_ - updating store file list. 2024-12-02T23:09:30,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41311 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 62b48487498549db84901af7bfe771b8_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-02T23:09:30,397 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41311 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/62aeb8e89e443c08bcf2c307416afdb9/cf/62b48487498549db84901af7bfe771b8_SeqId_4_ into 62aeb8e89e443c08bcf2c307416afdb9/cf 2024-12-02T23:09:30,397 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41311 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/staging/jenkins__testExportFileSystemStateWithSplitRegion__g2rcg1mfkjd289pi171hj092nck7roa7btq73a1rlh12ge6n6n7bakebb5k926di/cf/test_file into 62aeb8e89e443c08bcf2c307416afdb9/cf (new location: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/62aeb8e89e443c08bcf2c307416afdb9/cf/62b48487498549db84901af7bfe771b8_SeqId_4_) 2024-12-02T23:09:30,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41311 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/staging/jenkins__testExportFileSystemStateWithSplitRegion__g2rcg1mfkjd289pi171hj092nck7roa7btq73a1rlh12ge6n6n7bakebb5k926di/cf/test_file 2024-12-02T23:09:30,417 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T23:09:30,417 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.bulkLoad(BulkLoadHFilesTool.java:1125) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.run(BulkLoadHFilesTool.java:1176) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemStateWithSplitRegion(TestExportSnapshot.java:229) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T23:09:30,417 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:09:30,418 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:09:30,418 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:09:30,419 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-02T23:09:30,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41311 {}] ipc.CallRunner(93): RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41311: skipped callId: 7 service: ClientService methodName: CleanupBulkLoad size: 336 connection: 172.17.0.3:41232 deadline: 1733181030422 param: TODO: class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$CleanupBulkLoadRequest connection: 172.17.0.3:41232 2024-12-02T23:09:30,424 DEBUG [RPCClient-NioEventLoopGroup-6-14 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testExportFileSystemStateWithSplitRegion,,1733180961818.62aeb8e89e443c08bcf2c307416afdb9., hostname=9e5ab3b623d7,41311,1733180945828, seqNum=2 , the old value is region=testExportFileSystemStateWithSplitRegion,,1733180961818.62aeb8e89e443c08bcf2c307416afdb9., hostname=9e5ab3b623d7,41311,1733180945828, seqNum=2, error=org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Call to address=9e5ab3b623d7:41311 failed on local exception: org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-12-02T23:09:30,425 DEBUG [RPCClient-NioEventLoopGroup-6-14 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testExportFileSystemStateWithSplitRegion,,1733180961818.62aeb8e89e443c08bcf2c307416afdb9., hostname=9e5ab3b623d7,41311,1733180945828, seqNum=2 is org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-12-02T23:09:30,425 DEBUG [RPCClient-NioEventLoopGroup-6-14 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testExportFileSystemStateWithSplitRegion,,1733180961818.62aeb8e89e443c08bcf2c307416afdb9., hostname=9e5ab3b623d7,41311,1733180945828, seqNum=2 from cache 2024-12-02T23:09:30,432 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportFileSystemStateWithSplitRegion', row='5', locateType=CURRENT is [region=testExportFileSystemStateWithSplitRegion,,1733180961818.62aeb8e89e443c08bcf2c307416afdb9., hostname=9e5ab3b623d7,41311,1733180945828, seqNum=2] 2024-12-02T23:09:30,444 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster$3(2313): Client=jenkins//172.17.0.3 split testExportFileSystemStateWithSplitRegion,,1733180961818.62aeb8e89e443c08bcf2c307416afdb9. 2024-12-02T23:09:30,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:09:30,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=62aeb8e89e443c08bcf2c307416afdb9, daughterA=3e0e18998b7a6d2647649ac0e22e86e1, daughterB=05ddcad59d270acecb26f8927fa92fb7 2024-12-02T23:09:30,461 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=62aeb8e89e443c08bcf2c307416afdb9, daughterA=3e0e18998b7a6d2647649ac0e22e86e1, daughterB=05ddcad59d270acecb26f8927fa92fb7 2024-12-02T23:09:30,461 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=62aeb8e89e443c08bcf2c307416afdb9, daughterA=3e0e18998b7a6d2647649ac0e22e86e1, daughterB=05ddcad59d270acecb26f8927fa92fb7 2024-12-02T23:09:30,461 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=62aeb8e89e443c08bcf2c307416afdb9, daughterA=3e0e18998b7a6d2647649ac0e22e86e1, daughterB=05ddcad59d270acecb26f8927fa92fb7 2024-12-02T23:09:30,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-02T23:09:30,471 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=62aeb8e89e443c08bcf2c307416afdb9, UNASSIGN}] 2024-12-02T23:09:30,480 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=62aeb8e89e443c08bcf2c307416afdb9, UNASSIGN 2024-12-02T23:09:30,483 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=22 updating hbase:meta row=62aeb8e89e443c08bcf2c307416afdb9, regionState=CLOSING, regionLocation=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:09:30,487 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=62aeb8e89e443c08bcf2c307416afdb9, UNASSIGN because future has completed 2024-12-02T23:09:30,490 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-02T23:09:30,490 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE, hasLock=false; CloseRegionProcedure 62aeb8e89e443c08bcf2c307416afdb9, server=9e5ab3b623d7,41311,1733180945828}] 2024-12-02T23:09:30,534 WARN [Async-Client-Retry-Timer-pool-0 {}] client.AsyncNonMetaRegionLocator(265): Failed to locate region in 'testExportFileSystemStateWithSplitRegion', row='', locateType=CURRENT org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=9e5ab3b623d7:39847 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.scan(ClientProtos.java:43851) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.callOpenScanner(AsyncClientScanner.java:177) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:242) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.timelineConsistentRead(ConnectionUtils.java:442) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:255) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.start(AsyncClientScanner.java:275) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:617) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:91) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.locateInMeta(AsyncNonMetaRegionLocator.java:408) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocationsInternal(AsyncNonMetaRegionLocator.java:516) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocations(AsyncNonMetaRegionLocator.java:529) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.lambda$getRegionLocation$7(AsyncRegionLocator.java:164) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.tracedLocationFuture(AsyncRegionLocator.java:106) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:158) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:193) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.lambda$tryScheduleRetry$1(AsyncRpcRetryingCaller.java:139) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.run(HashedWheelTimer.java:713) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.ImmediateExecutor.execute(ImmediateExecutor.java:34) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.expire(HashedWheelTimer.java:701) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelBucket.expireTimeouts(HashedWheelTimer.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 34 more 2024-12-02T23:09:30,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-02T23:09:30,659 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(122): Close 62aeb8e89e443c08bcf2c307416afdb9 2024-12-02T23:09:30,666 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-02T23:09:30,667 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1722): Closing 62aeb8e89e443c08bcf2c307416afdb9, disabling compactions & flushes 2024-12-02T23:09:30,667 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733180961818.62aeb8e89e443c08bcf2c307416afdb9. 2024-12-02T23:09:30,667 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733180961818.62aeb8e89e443c08bcf2c307416afdb9. 2024-12-02T23:09:30,668 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733180961818.62aeb8e89e443c08bcf2c307416afdb9. after waiting 0 ms 2024-12-02T23:09:30,668 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733180961818.62aeb8e89e443c08bcf2c307416afdb9. 2024-12-02T23:09:30,698 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/62aeb8e89e443c08bcf2c307416afdb9/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=1 2024-12-02T23:09:30,703 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:09:30,703 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733180961818.62aeb8e89e443c08bcf2c307416afdb9. 2024-12-02T23:09:30,703 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1676): Region close journal for 62aeb8e89e443c08bcf2c307416afdb9: Waiting for close lock at 1733180970667Running coprocessor pre-close hooks at 1733180970667Disabling compacts and flushes for region at 1733180970667Disabling writes for close at 1733180970668 (+1 ms)Writing region close event to WAL at 1733180970683 (+15 ms)Running coprocessor post-close hooks at 1733180970699 (+16 ms)Closed at 1733180970703 (+4 ms) 2024-12-02T23:09:30,709 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(157): Closed 62aeb8e89e443c08bcf2c307416afdb9 2024-12-02T23:09:30,713 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=22 updating hbase:meta row=62aeb8e89e443c08bcf2c307416afdb9, regionState=CLOSED 2024-12-02T23:09:30,717 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=23, ppid=22, state=RUNNABLE, hasLock=false; CloseRegionProcedure 62aeb8e89e443c08bcf2c307416afdb9, server=9e5ab3b623d7,41311,1733180945828 because future has completed 2024-12-02T23:09:30,726 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=23, resume processing ppid=22 2024-12-02T23:09:30,726 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=23, ppid=22, state=SUCCESS, hasLock=false; CloseRegionProcedure 62aeb8e89e443c08bcf2c307416afdb9, server=9e5ab3b623d7,41311,1733180945828 in 229 msec 2024-12-02T23:09:30,729 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=22, resume processing ppid=21 2024-12-02T23:09:30,729 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=22, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=62aeb8e89e443c08bcf2c307416afdb9, UNASSIGN in 255 msec 2024-12-02T23:09:30,743 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:09:30,746 INFO [PEWorker-5 {}] assignment.SplitTableRegionProcedure(728): pid=21 splitting 1 storefiles, region=62aeb8e89e443c08bcf2c307416afdb9, threads=1 2024-12-02T23:09:30,749 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=21 splitting started for store file: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/62aeb8e89e443c08bcf2c307416afdb9/cf/62b48487498549db84901af7bfe771b8_SeqId_4_ for region: 62aeb8e89e443c08bcf2c307416afdb9 2024-12-02T23:09:30,760 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 62b48487498549db84901af7bfe771b8_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-02T23:09:30,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-02T23:09:30,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741864_1040 (size=21) 2024-12-02T23:09:30,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741864_1040 (size=21) 2024-12-02T23:09:30,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741864_1040 (size=21) 2024-12-02T23:09:30,852 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 62b48487498549db84901af7bfe771b8_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-02T23:09:30,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741865_1041 (size=21) 2024-12-02T23:09:30,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741865_1041 (size=21) 2024-12-02T23:09:30,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741865_1041 (size=21) 2024-12-02T23:09:30,874 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=21 splitting complete for store file: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/62aeb8e89e443c08bcf2c307416afdb9/cf/62b48487498549db84901af7bfe771b8_SeqId_4_ for region: 62aeb8e89e443c08bcf2c307416afdb9 2024-12-02T23:09:30,876 DEBUG [PEWorker-5 {}] assignment.SplitTableRegionProcedure(802): pid=21 split storefiles for region 62aeb8e89e443c08bcf2c307416afdb9 Daughter A: [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/3e0e18998b7a6d2647649ac0e22e86e1/cf/62b48487498549db84901af7bfe771b8_SeqId_4_.62aeb8e89e443c08bcf2c307416afdb9] storefiles, Daughter B: [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/05ddcad59d270acecb26f8927fa92fb7/cf/62b48487498549db84901af7bfe771b8_SeqId_4_.62aeb8e89e443c08bcf2c307416afdb9] storefiles. 2024-12-02T23:09:31,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741866_1042 (size=76) 2024-12-02T23:09:31,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741866_1042 (size=76) 2024-12-02T23:09:31,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741866_1042 (size=76) 2024-12-02T23:09:31,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-02T23:09:31,423 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:09:31,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741867_1043 (size=76) 2024-12-02T23:09:31,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741867_1043 (size=76) 2024-12-02T23:09:31,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741867_1043 (size=76) 2024-12-02T23:09:31,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-02T23:09:31,851 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:09:31,865 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/3e0e18998b7a6d2647649ac0e22e86e1/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=-1 2024-12-02T23:09:31,868 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/05ddcad59d270acecb26f8927fa92fb7/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=-1 2024-12-02T23:09:31,871 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,,1733180961818.62aeb8e89e443c08bcf2c307416afdb9.","families":{"info":[{"qualifier":"regioninfo","vlen":74,"tag":[],"timestamp":"1733180971870"},{"qualifier":"splitA","vlen":75,"tag":[],"timestamp":"1733180971870"},{"qualifier":"splitB","vlen":75,"tag":[],"timestamp":"1733180971870"}]},"ts":"1733180971870"} 2024-12-02T23:09:31,871 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,,1733180970452.3e0e18998b7a6d2647649ac0e22e86e1.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733180971870"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733180971870"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733180971870"}]},"ts":"1733180971870"} 2024-12-02T23:09:31,871 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,5,1733180970452.05ddcad59d270acecb26f8927fa92fb7.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733180971870"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733180971870"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733180971870"}]},"ts":"1733180971870"} 2024-12-02T23:09:31,884 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=3e0e18998b7a6d2647649ac0e22e86e1, ASSIGN}, {pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=05ddcad59d270acecb26f8927fa92fb7, ASSIGN}] 2024-12-02T23:09:31,885 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=3e0e18998b7a6d2647649ac0e22e86e1, ASSIGN 2024-12-02T23:09:31,885 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=05ddcad59d270acecb26f8927fa92fb7, ASSIGN 2024-12-02T23:09:31,886 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=3e0e18998b7a6d2647649ac0e22e86e1, ASSIGN; state=SPLITTING_NEW, location=9e5ab3b623d7,41311,1733180945828; forceNewPlan=false, retain=false 2024-12-02T23:09:31,886 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=05ddcad59d270acecb26f8927fa92fb7, ASSIGN; state=SPLITTING_NEW, location=9e5ab3b623d7,41311,1733180945828; forceNewPlan=false, retain=false 2024-12-02T23:09:32,037 INFO [9e5ab3b623d7:41513 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T23:09:32,037 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=3e0e18998b7a6d2647649ac0e22e86e1, regionState=OPENING, regionLocation=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:09:32,037 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=25 updating hbase:meta row=05ddcad59d270acecb26f8927fa92fb7, regionState=OPENING, regionLocation=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:09:32,048 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=3e0e18998b7a6d2647649ac0e22e86e1, ASSIGN because future has completed 2024-12-02T23:09:32,049 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=26, ppid=24, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3e0e18998b7a6d2647649ac0e22e86e1, server=9e5ab3b623d7,41311,1733180945828}] 2024-12-02T23:09:32,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=05ddcad59d270acecb26f8927fa92fb7, ASSIGN because future has completed 2024-12-02T23:09:32,052 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=27, ppid=25, state=RUNNABLE, hasLock=false; OpenRegionProcedure 05ddcad59d270acecb26f8927fa92fb7, server=9e5ab3b623d7,41311,1733180945828}] 2024-12-02T23:09:32,207 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,,1733180970452.3e0e18998b7a6d2647649ac0e22e86e1. 2024-12-02T23:09:32,208 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7752): Opening region: {ENCODED => 3e0e18998b7a6d2647649ac0e22e86e1, NAME => 'testExportFileSystemStateWithSplitRegion,,1733180970452.3e0e18998b7a6d2647649ac0e22e86e1.', STARTKEY => '', ENDKEY => '5'} 2024-12-02T23:09:32,208 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,,1733180970452.3e0e18998b7a6d2647649ac0e22e86e1. service=AccessControlService 2024-12-02T23:09:32,208 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T23:09:32,209 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion 3e0e18998b7a6d2647649ac0e22e86e1 2024-12-02T23:09:32,209 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733180970452.3e0e18998b7a6d2647649ac0e22e86e1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:09:32,209 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7794): checking encryption for 3e0e18998b7a6d2647649ac0e22e86e1 2024-12-02T23:09:32,209 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7797): checking classloading for 3e0e18998b7a6d2647649ac0e22e86e1 2024-12-02T23:09:32,211 INFO [StoreOpener-3e0e18998b7a6d2647649ac0e22e86e1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3e0e18998b7a6d2647649ac0e22e86e1 2024-12-02T23:09:32,212 INFO [StoreOpener-3e0e18998b7a6d2647649ac0e22e86e1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3e0e18998b7a6d2647649ac0e22e86e1 columnFamilyName cf 2024-12-02T23:09:32,212 DEBUG [StoreOpener-3e0e18998b7a6d2647649ac0e22e86e1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:09:32,226 DEBUG [StoreFileOpener-3e0e18998b7a6d2647649ac0e22e86e1-cf-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 62b48487498549db84901af7bfe771b8_SeqId_4_.62aeb8e89e443c08bcf2c307416afdb9: NONE, but ROW specified in column family configuration 2024-12-02T23:09:32,245 DEBUG [StoreOpener-3e0e18998b7a6d2647649ac0e22e86e1-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/3e0e18998b7a6d2647649ac0e22e86e1/cf/62b48487498549db84901af7bfe771b8_SeqId_4_.62aeb8e89e443c08bcf2c307416afdb9->hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/62aeb8e89e443c08bcf2c307416afdb9/cf/62b48487498549db84901af7bfe771b8_SeqId_4_-bottom 2024-12-02T23:09:32,246 INFO [StoreOpener-3e0e18998b7a6d2647649ac0e22e86e1-1 {}] regionserver.HStore(327): Store=3e0e18998b7a6d2647649ac0e22e86e1/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T23:09:32,246 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1038): replaying wal for 3e0e18998b7a6d2647649ac0e22e86e1 2024-12-02T23:09:32,247 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/3e0e18998b7a6d2647649ac0e22e86e1 2024-12-02T23:09:32,249 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/3e0e18998b7a6d2647649ac0e22e86e1 2024-12-02T23:09:32,250 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1048): stopping wal replay for 3e0e18998b7a6d2647649ac0e22e86e1 2024-12-02T23:09:32,250 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1060): Cleaning up temporary data for 3e0e18998b7a6d2647649ac0e22e86e1 2024-12-02T23:09:32,253 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1093): writing seq id for 3e0e18998b7a6d2647649ac0e22e86e1 2024-12-02T23:09:32,254 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1114): Opened 3e0e18998b7a6d2647649ac0e22e86e1; next sequenceid=7; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63077709, jitterRate=-0.06006889045238495}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T23:09:32,254 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3e0e18998b7a6d2647649ac0e22e86e1 2024-12-02T23:09:32,256 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1006): Region open journal for 3e0e18998b7a6d2647649ac0e22e86e1: Running coprocessor pre-open hook at 1733180972209Writing region info on filesystem at 1733180972209Initializing all the Stores at 1733180972210 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733180972210Cleaning up temporary data from old regions at 1733180972250 (+40 ms)Running coprocessor post-open hooks at 1733180972254 (+4 ms)Region opened successfully at 1733180972256 (+2 ms) 2024-12-02T23:09:32,257 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,,1733180970452.3e0e18998b7a6d2647649ac0e22e86e1., pid=26, masterSystemTime=1733180972203 2024-12-02T23:09:32,257 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.CompactSplit(342): Ignoring compaction request for testExportFileSystemStateWithSplitRegion,,1733180970452.3e0e18998b7a6d2647649ac0e22e86e1.,because compaction is disabled. 2024-12-02T23:09:32,341 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,,1733180970452.3e0e18998b7a6d2647649ac0e22e86e1. 2024-12-02T23:09:32,342 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,,1733180970452.3e0e18998b7a6d2647649ac0e22e86e1. 2024-12-02T23:09:32,342 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,5,1733180970452.05ddcad59d270acecb26f8927fa92fb7. 2024-12-02T23:09:32,343 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7752): Opening region: {ENCODED => 05ddcad59d270acecb26f8927fa92fb7, NAME => 'testExportFileSystemStateWithSplitRegion,5,1733180970452.05ddcad59d270acecb26f8927fa92fb7.', STARTKEY => '5', ENDKEY => ''} 2024-12-02T23:09:32,343 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,5,1733180970452.05ddcad59d270acecb26f8927fa92fb7. service=AccessControlService 2024-12-02T23:09:32,343 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T23:09:32,344 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion 05ddcad59d270acecb26f8927fa92fb7 2024-12-02T23:09:32,344 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,5,1733180970452.05ddcad59d270acecb26f8927fa92fb7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:09:32,344 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=3e0e18998b7a6d2647649ac0e22e86e1, regionState=OPEN, openSeqNum=7, regionLocation=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:09:32,344 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7794): checking encryption for 05ddcad59d270acecb26f8927fa92fb7 2024-12-02T23:09:32,344 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7797): checking classloading for 05ddcad59d270acecb26f8927fa92fb7 2024-12-02T23:09:32,349 INFO [StoreOpener-05ddcad59d270acecb26f8927fa92fb7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 05ddcad59d270acecb26f8927fa92fb7 2024-12-02T23:09:32,351 INFO [StoreOpener-05ddcad59d270acecb26f8927fa92fb7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 05ddcad59d270acecb26f8927fa92fb7 columnFamilyName cf 2024-12-02T23:09:32,351 DEBUG [StoreOpener-05ddcad59d270acecb26f8927fa92fb7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:09:32,366 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=26, ppid=24, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3e0e18998b7a6d2647649ac0e22e86e1, server=9e5ab3b623d7,41311,1733180945828 because future has completed 2024-12-02T23:09:32,369 DEBUG [StoreFileOpener-05ddcad59d270acecb26f8927fa92fb7-cf-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 62b48487498549db84901af7bfe771b8_SeqId_4_.62aeb8e89e443c08bcf2c307416afdb9: NONE, but ROW specified in column family configuration 2024-12-02T23:09:32,372 DEBUG [StoreOpener-05ddcad59d270acecb26f8927fa92fb7-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/05ddcad59d270acecb26f8927fa92fb7/cf/62b48487498549db84901af7bfe771b8_SeqId_4_.62aeb8e89e443c08bcf2c307416afdb9->hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/62aeb8e89e443c08bcf2c307416afdb9/cf/62b48487498549db84901af7bfe771b8_SeqId_4_-top 2024-12-02T23:09:32,372 INFO [StoreOpener-05ddcad59d270acecb26f8927fa92fb7-1 {}] regionserver.HStore(327): Store=05ddcad59d270acecb26f8927fa92fb7/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T23:09:32,372 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1038): replaying wal for 05ddcad59d270acecb26f8927fa92fb7 2024-12-02T23:09:32,374 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/05ddcad59d270acecb26f8927fa92fb7 2024-12-02T23:09:32,375 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=26, resume processing ppid=24 2024-12-02T23:09:32,376 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=26, ppid=24, state=SUCCESS, hasLock=false; OpenRegionProcedure 3e0e18998b7a6d2647649ac0e22e86e1, server=9e5ab3b623d7,41311,1733180945828 in 322 msec 2024-12-02T23:09:32,378 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/05ddcad59d270acecb26f8927fa92fb7 2024-12-02T23:09:32,380 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=24, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=3e0e18998b7a6d2647649ac0e22e86e1, ASSIGN in 492 msec 2024-12-02T23:09:32,380 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1048): stopping wal replay for 05ddcad59d270acecb26f8927fa92fb7 2024-12-02T23:09:32,380 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1060): Cleaning up temporary data for 05ddcad59d270acecb26f8927fa92fb7 2024-12-02T23:09:32,394 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1093): writing seq id for 05ddcad59d270acecb26f8927fa92fb7 2024-12-02T23:09:32,396 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1114): Opened 05ddcad59d270acecb26f8927fa92fb7; next sequenceid=7; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60553833, jitterRate=-0.0976775735616684}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T23:09:32,396 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 05ddcad59d270acecb26f8927fa92fb7 2024-12-02T23:09:32,396 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1006): Region open journal for 05ddcad59d270acecb26f8927fa92fb7: Running coprocessor pre-open hook at 1733180972344Writing region info on filesystem at 1733180972344Initializing all the Stores at 1733180972349 (+5 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733180972349Cleaning up temporary data from old regions at 1733180972380 (+31 ms)Running coprocessor post-open hooks at 1733180972396 (+16 ms)Region opened successfully at 1733180972396 2024-12-02T23:09:32,398 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,5,1733180970452.05ddcad59d270acecb26f8927fa92fb7., pid=27, masterSystemTime=1733180972203 2024-12-02T23:09:32,398 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.CompactSplit(342): Ignoring compaction request for testExportFileSystemStateWithSplitRegion,5,1733180970452.05ddcad59d270acecb26f8927fa92fb7.,because compaction is disabled. 2024-12-02T23:09:32,405 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,5,1733180970452.05ddcad59d270acecb26f8927fa92fb7. 2024-12-02T23:09:32,406 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,5,1733180970452.05ddcad59d270acecb26f8927fa92fb7. 2024-12-02T23:09:32,408 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=25 updating hbase:meta row=05ddcad59d270acecb26f8927fa92fb7, regionState=OPEN, openSeqNum=7, regionLocation=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:09:32,415 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=27, ppid=25, state=RUNNABLE, hasLock=false; OpenRegionProcedure 05ddcad59d270acecb26f8927fa92fb7, server=9e5ab3b623d7,41311,1733180945828 because future has completed 2024-12-02T23:09:32,423 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=27, resume processing ppid=25 2024-12-02T23:09:32,423 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=27, ppid=25, state=SUCCESS, hasLock=false; OpenRegionProcedure 05ddcad59d270acecb26f8927fa92fb7, server=9e5ab3b623d7,41311,1733180945828 in 366 msec 2024-12-02T23:09:32,430 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=25, resume processing ppid=21 2024-12-02T23:09:32,431 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=25, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=05ddcad59d270acecb26f8927fa92fb7, ASSIGN in 539 msec 2024-12-02T23:09:32,432 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=21, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=62aeb8e89e443c08bcf2c307416afdb9, daughterA=3e0e18998b7a6d2647649ac0e22e86e1, daughterB=05ddcad59d270acecb26f8927fa92fb7 in 1.9750 sec 2024-12-02T23:09:32,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-02T23:09:32,618 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-02T23:09:32,618 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SPLIT_REGION, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-02T23:09:32,632 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-02T23:09:32,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733180972632 (current time:1733180972632). 2024-12-02T23:09:32,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T23:09:32,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-02T23:09:32,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T23:09:32,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@627202da, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:09:32,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:09:32,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:09:32,636 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:09:32,637 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:09:32,637 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:09:32,637 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3fab4c1f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:09:32,637 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:09:32,637 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:09:32,638 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:09:32,639 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34278, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:09:32,640 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@549fc887, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:09:32,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:09:32,644 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:09:32,644 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:09:32,646 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37670, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:09:32,648 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:09:32,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:09:32,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:09:32,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:09:32,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@686b7cc0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:09:32,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:09:32,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:09:32,650 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:09:32,651 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:09:32,651 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:09:32,651 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:09:32,652 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a10a468, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:09:32,652 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:09:32,652 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:09:32,652 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:09:32,654 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34292, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:09:32,655 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@613575f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:09:32,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:09:32,657 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:09:32,657 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:09:32,659 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37672, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:09:32,662 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b., hostname=9e5ab3b623d7,41311,1733180945828, seqNum=2] 2024-12-02T23:09:32,662 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:09:32,663 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40500, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:09:32,665 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:09:32,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:09:32,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:09:32,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:09:32,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] access.PermissionStorage(613): Read acl: entry[testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-02T23:09:32,666 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:09:32,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T23:09:32,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=28, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-02T23:09:32,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 28 2024-12-02T23:09:32,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-02T23:09:32,671 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T23:09:32,672 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T23:09:32,676 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T23:09:32,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-02T23:09:32,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741868_1044 (size=197) 2024-12-02T23:09:32,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741868_1044 (size=197) 2024-12-02T23:09:32,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741868_1044 (size=197) 2024-12-02T23:09:32,791 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T23:09:32,791 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3e0e18998b7a6d2647649ac0e22e86e1}, {pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 05ddcad59d270acecb26f8927fa92fb7}] 2024-12-02T23:09:32,793 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 05ddcad59d270acecb26f8927fa92fb7 2024-12-02T23:09:32,794 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3e0e18998b7a6d2647649ac0e22e86e1 2024-12-02T23:09:32,947 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41311 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=30 2024-12-02T23:09:32,947 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportFileSystemStateWithSplitRegion,5,1733180970452.05ddcad59d270acecb26f8927fa92fb7. 2024-12-02T23:09:32,947 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41311 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=29 2024-12-02T23:09:32,948 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportFileSystemStateWithSplitRegion,,1733180970452.3e0e18998b7a6d2647649ac0e22e86e1. 2024-12-02T23:09:32,948 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.HRegion(2603): Flush status journal for 05ddcad59d270acecb26f8927fa92fb7: 2024-12-02T23:09:32,948 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportFileSystemStateWithSplitRegion,5,1733180970452.05ddcad59d270acecb26f8927fa92fb7. for snapshot-testExportFileSystemStateWithSplitRegion completed. 2024-12-02T23:09:32,948 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.HRegion(2603): Flush status journal for 3e0e18998b7a6d2647649ac0e22e86e1: 2024-12-02T23:09:32,948 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportFileSystemStateWithSplitRegion,,1733180970452.3e0e18998b7a6d2647649ac0e22e86e1. for snapshot-testExportFileSystemStateWithSplitRegion completed. 2024-12-02T23:09:32,948 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(241): Storing 'testExportFileSystemStateWithSplitRegion,5,1733180970452.05ddcad59d270acecb26f8927fa92fb7.' region-info for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T23:09:32,948 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:09:32,948 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(241): Storing 'testExportFileSystemStateWithSplitRegion,,1733180970452.3e0e18998b7a6d2647649ac0e22e86e1.' region-info for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T23:09:32,948 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:09:32,948 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/05ddcad59d270acecb26f8927fa92fb7/cf/62b48487498549db84901af7bfe771b8_SeqId_4_.62aeb8e89e443c08bcf2c307416afdb9->hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/62aeb8e89e443c08bcf2c307416afdb9/cf/62b48487498549db84901af7bfe771b8_SeqId_4_-top] hfiles 2024-12-02T23:09:32,948 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/05ddcad59d270acecb26f8927fa92fb7/cf/62b48487498549db84901af7bfe771b8_SeqId_4_.62aeb8e89e443c08bcf2c307416afdb9 for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T23:09:32,948 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/3e0e18998b7a6d2647649ac0e22e86e1/cf/62b48487498549db84901af7bfe771b8_SeqId_4_.62aeb8e89e443c08bcf2c307416afdb9->hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/62aeb8e89e443c08bcf2c307416afdb9/cf/62b48487498549db84901af7bfe771b8_SeqId_4_-bottom] hfiles 2024-12-02T23:09:32,948 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/3e0e18998b7a6d2647649ac0e22e86e1/cf/62b48487498549db84901af7bfe771b8_SeqId_4_.62aeb8e89e443c08bcf2c307416afdb9 for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T23:09:32,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-02T23:09:33,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741869_1045 (size=182) 2024-12-02T23:09:33,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741869_1045 (size=182) 2024-12-02T23:09:33,100 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportFileSystemStateWithSplitRegion,,1733180970452.3e0e18998b7a6d2647649ac0e22e86e1. 2024-12-02T23:09:33,101 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-12-02T23:09:33,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741870_1046 (size=182) 2024-12-02T23:09:33,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741870_1046 (size=182) 2024-12-02T23:09:33,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741869_1045 (size=182) 2024-12-02T23:09:33,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=29 2024-12-02T23:09:33,102 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportFileSystemStateWithSplitRegion on region 3e0e18998b7a6d2647649ac0e22e86e1 2024-12-02T23:09:33,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741870_1046 (size=182) 2024-12-02T23:09:33,102 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3e0e18998b7a6d2647649ac0e22e86e1 2024-12-02T23:09:33,104 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportFileSystemStateWithSplitRegion,5,1733180970452.05ddcad59d270acecb26f8927fa92fb7. 2024-12-02T23:09:33,104 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=30 2024-12-02T23:09:33,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=30 2024-12-02T23:09:33,105 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportFileSystemStateWithSplitRegion on region 05ddcad59d270acecb26f8927fa92fb7 2024-12-02T23:09:33,105 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 05ddcad59d270acecb26f8927fa92fb7 2024-12-02T23:09:33,114 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=29, ppid=28, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3e0e18998b7a6d2647649ac0e22e86e1 in 315 msec 2024-12-02T23:09:33,128 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=30, resume processing ppid=28 2024-12-02T23:09:33,128 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=30, ppid=28, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 05ddcad59d270acecb26f8927fa92fb7 in 319 msec 2024-12-02T23:09:33,128 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T23:09:33,130 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T23:09:33,131 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T23:09:33,131 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:09:33,136 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/62aeb8e89e443c08bcf2c307416afdb9/cf/62b48487498549db84901af7bfe771b8_SeqId_4_] hfiles 2024-12-02T23:09:33,136 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/1): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/62aeb8e89e443c08bcf2c307416afdb9/cf/62b48487498549db84901af7bfe771b8_SeqId_4_ 2024-12-02T23:09:33,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741871_1047 (size=129) 2024-12-02T23:09:33,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741871_1047 (size=129) 2024-12-02T23:09:33,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741871_1047 (size=129) 2024-12-02T23:09:33,226 INFO [SplitRegionsSnapshotPool-pool-0 {}] procedure.SnapshotProcedure$1(378): take snapshot region={ENCODED => 62aeb8e89e443c08bcf2c307416afdb9, NAME => 'testExportFileSystemStateWithSplitRegion,,1733180961818.62aeb8e89e443c08bcf2c307416afdb9.', STARTKEY => '', ENDKEY => '', OFFLINE => true, SPLIT => true}, table=testExportFileSystemStateWithSplitRegion 2024-12-02T23:09:33,228 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T23:09:33,230 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T23:09:33,231 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T23:09:33,234 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T23:09:33,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-02T23:09:33,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741872_1048 (size=891) 2024-12-02T23:09:33,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741872_1048 (size=891) 2024-12-02T23:09:33,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741872_1048 (size=891) 2024-12-02T23:09:33,306 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T23:09:33,321 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T23:09:33,322 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T23:09:33,324 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T23:09:33,326 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 28 2024-12-02T23:09:33,329 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=28, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 659 msec 2024-12-02T23:09:33,582 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T23:09:33,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-02T23:09:33,803 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-02T23:09:33,804 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733180973803 2024-12-02T23:09:33,804 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:33369, tgtDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733180973803, rawTgtDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733180973803, srcFsUri=hdfs://localhost:33369, srcDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:09:33,868 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:33369, inputRoot=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:09:33,868 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_71179475_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733180973803, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733180973803/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T23:09:33,876 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-02T23:09:33,895 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733180973803/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T23:09:33,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741873_1049 (size=197) 2024-12-02T23:09:33,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741873_1049 (size=197) 2024-12-02T23:09:33,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741873_1049 (size=197) 2024-12-02T23:09:33,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741874_1050 (size=891) 2024-12-02T23:09:33,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741874_1050 (size=891) 2024-12-02T23:09:33,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741874_1050 (size=891) 2024-12-02T23:09:33,995 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:09:33,996 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:09:33,996 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:09:35,300 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop-12277292457627919128.jar 2024-12-02T23:09:35,301 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:09:35,301 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:09:35,430 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop-84777170064969074.jar 2024-12-02T23:09:35,431 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:09:35,432 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:09:35,432 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:09:35,433 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:09:35,433 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:09:35,434 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:09:35,434 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-02T23:09:35,435 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-02T23:09:35,435 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-02T23:09:35,436 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-02T23:09:35,436 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-02T23:09:35,436 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-02T23:09:35,437 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-02T23:09:35,437 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-02T23:09:35,438 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-02T23:09:35,438 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-02T23:09:35,439 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-02T23:09:35,443 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:09:35,444 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:09:35,444 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T23:09:35,445 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:09:35,445 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:09:35,446 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T23:09:35,446 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T23:09:35,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741875_1051 (size=131440) 2024-12-02T23:09:35,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741875_1051 (size=131440) 2024-12-02T23:09:35,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741875_1051 (size=131440) 2024-12-02T23:09:35,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741876_1052 (size=4188619) 2024-12-02T23:09:35,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741876_1052 (size=4188619) 2024-12-02T23:09:35,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741876_1052 (size=4188619) 2024-12-02T23:09:35,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741877_1053 (size=1323991) 2024-12-02T23:09:35,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741877_1053 (size=1323991) 2024-12-02T23:09:35,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741877_1053 (size=1323991) 2024-12-02T23:09:35,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741878_1054 (size=903926) 2024-12-02T23:09:35,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741878_1054 (size=903926) 2024-12-02T23:09:35,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741878_1054 (size=903926) 2024-12-02T23:09:35,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741879_1055 (size=8360083) 2024-12-02T23:09:35,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741879_1055 (size=8360083) 2024-12-02T23:09:35,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741879_1055 (size=8360083) 2024-12-02T23:09:35,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741880_1056 (size=1877034) 2024-12-02T23:09:35,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741880_1056 (size=1877034) 2024-12-02T23:09:35,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741880_1056 (size=1877034) 2024-12-02T23:09:35,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741881_1057 (size=77835) 2024-12-02T23:09:35,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741881_1057 (size=77835) 2024-12-02T23:09:35,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741881_1057 (size=77835) 2024-12-02T23:09:35,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741882_1058 (size=443172) 2024-12-02T23:09:35,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741882_1058 (size=443172) 2024-12-02T23:09:35,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741882_1058 (size=443172) 2024-12-02T23:09:35,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741883_1059 (size=30949) 2024-12-02T23:09:36,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741883_1059 (size=30949) 2024-12-02T23:09:36,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741883_1059 (size=30949) 2024-12-02T23:09:36,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741884_1060 (size=1597213) 2024-12-02T23:09:36,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741884_1060 (size=1597213) 2024-12-02T23:09:36,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741884_1060 (size=1597213) 2024-12-02T23:09:36,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741885_1061 (size=6424743) 2024-12-02T23:09:36,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741885_1061 (size=6424743) 2024-12-02T23:09:36,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741885_1061 (size=6424743) 2024-12-02T23:09:36,244 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T23:09:36,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741886_1062 (size=4695811) 2024-12-02T23:09:36,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741886_1062 (size=4695811) 2024-12-02T23:09:36,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741886_1062 (size=4695811) 2024-12-02T23:09:36,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741887_1063 (size=232957) 2024-12-02T23:09:36,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741887_1063 (size=232957) 2024-12-02T23:09:36,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741887_1063 (size=232957) 2024-12-02T23:09:36,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741888_1064 (size=127628) 2024-12-02T23:09:36,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741888_1064 (size=127628) 2024-12-02T23:09:36,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741888_1064 (size=127628) 2024-12-02T23:09:36,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741889_1065 (size=20406) 2024-12-02T23:09:36,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741889_1065 (size=20406) 2024-12-02T23:09:36,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741889_1065 (size=20406) 2024-12-02T23:09:36,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741890_1066 (size=5175431) 2024-12-02T23:09:36,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741890_1066 (size=5175431) 2024-12-02T23:09:36,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741890_1066 (size=5175431) 2024-12-02T23:09:36,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741891_1067 (size=217634) 2024-12-02T23:09:36,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741891_1067 (size=217634) 2024-12-02T23:09:36,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741891_1067 (size=217634) 2024-12-02T23:09:36,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741892_1068 (size=1832290) 2024-12-02T23:09:36,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741892_1068 (size=1832290) 2024-12-02T23:09:36,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741892_1068 (size=1832290) 2024-12-02T23:09:36,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741893_1069 (size=322274) 2024-12-02T23:09:36,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741893_1069 (size=322274) 2024-12-02T23:09:36,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741893_1069 (size=322274) 2024-12-02T23:09:36,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741894_1070 (size=503880) 2024-12-02T23:09:36,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741894_1070 (size=503880) 2024-12-02T23:09:36,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741894_1070 (size=503880) 2024-12-02T23:09:36,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741895_1071 (size=29229) 2024-12-02T23:09:36,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741895_1071 (size=29229) 2024-12-02T23:09:36,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741895_1071 (size=29229) 2024-12-02T23:09:36,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741896_1072 (size=24096) 2024-12-02T23:09:36,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741896_1072 (size=24096) 2024-12-02T23:09:36,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741896_1072 (size=24096) 2024-12-02T23:09:36,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741897_1073 (size=111872) 2024-12-02T23:09:36,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741897_1073 (size=111872) 2024-12-02T23:09:36,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741897_1073 (size=111872) 2024-12-02T23:09:36,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741898_1074 (size=45609) 2024-12-02T23:09:36,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741898_1074 (size=45609) 2024-12-02T23:09:36,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741898_1074 (size=45609) 2024-12-02T23:09:36,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741899_1075 (size=136454) 2024-12-02T23:09:36,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741899_1075 (size=136454) 2024-12-02T23:09:36,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741899_1075 (size=136454) 2024-12-02T23:09:36,755 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-02T23:09:36,761 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snapshot-testExportFileSystemStateWithSplitRegion' hfile list 2024-12-02T23:09:36,768 DEBUG [Time-limited test {}] snapshot.ExportSnapshot$1(689): Skip the existing file: cf/testExportFileSystemStateWithSplitRegion=62aeb8e89e443c08bcf2c307416afdb9-62b48487498549db84901af7bfe771b8_SeqId_4_. 2024-12-02T23:09:36,768 DEBUG [Time-limited test {}] snapshot.ExportSnapshot$1(689): Skip the existing file: cf/testExportFileSystemStateWithSplitRegion=62aeb8e89e443c08bcf2c307416afdb9-62b48487498549db84901af7bfe771b8_SeqId_4_. 2024-12-02T23:09:36,769 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=305.6 M 2024-12-02T23:09:36,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741900_1076 (size=244) 2024-12-02T23:09:36,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741900_1076 (size=244) 2024-12-02T23:09:36,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741900_1076 (size=244) 2024-12-02T23:09:36,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741901_1077 (size=17) 2024-12-02T23:09:36,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741901_1077 (size=17) 2024-12-02T23:09:36,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741901_1077 (size=17) 2024-12-02T23:09:36,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741902_1078 (size=304048) 2024-12-02T23:09:36,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741902_1078 (size=304048) 2024-12-02T23:09:36,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741902_1078 (size=304048) 2024-12-02T23:09:37,327 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T23:09:37,328 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T23:09:37,490 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0001_000001 (auth:SIMPLE) from 127.0.0.1:53684 2024-12-02T23:09:46,871 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0001_000001 (auth:SIMPLE) from 127.0.0.1:52910 2024-12-02T23:09:47,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741903_1079 (size=349746) 2024-12-02T23:09:47,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741903_1079 (size=349746) 2024-12-02T23:09:47,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741903_1079 (size=349746) 2024-12-02T23:09:49,565 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0001_000001 (auth:SIMPLE) from 127.0.0.1:49580 2024-12-02T23:09:51,601 INFO [master/9e5ab3b623d7:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-02T23:09:51,601 INFO [master/9e5ab3b623d7:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-02T23:10:03,582 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T23:10:04,103 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 3de09e2f50b4bb1a4fe80eb412d983f6, had cached 0 bytes from a total of 15937 2024-12-02T23:10:04,156 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 047f49362811e322054c938a41b68e9e, had cached 0 bytes from a total of 5754 2024-12-02T23:10:09,614 DEBUG [master/9e5ab3b623d7:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 047f49362811e322054c938a41b68e9e changed from -1.0 to 0.0, refreshing cache 2024-12-02T23:10:09,615 DEBUG [master/9e5ab3b623d7:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region a8d1ca9c63bdfff14551a9b4d2eb111b changed from -1.0 to 0.0, refreshing cache 2024-12-02T23:10:09,617 DEBUG [master/9e5ab3b623d7:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 3de09e2f50b4bb1a4fe80eb412d983f6 changed from -1.0 to 0.0, refreshing cache 2024-12-02T23:10:17,209 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 3e0e18998b7a6d2647649ac0e22e86e1, had cached 0 bytes from a total of 320414712 2024-12-02T23:10:17,344 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 05ddcad59d270acecb26f8927fa92fb7, had cached 0 bytes from a total of 320414712 2024-12-02T23:10:33,582 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T23:10:36,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741904_1080 (size=134217728) 2024-12-02T23:10:36,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741904_1080 (size=134217728) 2024-12-02T23:10:36,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741904_1080 (size=134217728) 2024-12-02T23:10:49,104 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 3de09e2f50b4bb1a4fe80eb412d983f6, had cached 0 bytes from a total of 15937 2024-12-02T23:10:49,157 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 047f49362811e322054c938a41b68e9e, had cached 0 bytes from a total of 5754 2024-12-02T23:11:02,209 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 3e0e18998b7a6d2647649ac0e22e86e1, had cached 0 bytes from a total of 320414712 2024-12-02T23:11:02,344 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 05ddcad59d270acecb26f8927fa92fb7, had cached 0 bytes from a total of 320414712 2024-12-02T23:11:03,583 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T23:11:14,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741905_1081 (size=134217728) 2024-12-02T23:11:14,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741905_1081 (size=134217728) 2024-12-02T23:11:14,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741905_1081 (size=134217728) 2024-12-02T23:11:29,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741906_1082 (size=51979256) 2024-12-02T23:11:29,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741906_1082 (size=51979256) 2024-12-02T23:11:29,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741906_1082 (size=51979256) 2024-12-02T23:11:29,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741907_1083 (size=17520) 2024-12-02T23:11:29,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741907_1083 (size=17520) 2024-12-02T23:11:29,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741907_1083 (size=17520) 2024-12-02T23:11:29,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741908_1084 (size=482) 2024-12-02T23:11:29,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741908_1084 (size=482) 2024-12-02T23:11:29,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741908_1084 (size=482) 2024-12-02T23:11:29,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741909_1085 (size=17520) 2024-12-02T23:11:29,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741909_1085 (size=17520) 2024-12-02T23:11:29,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741909_1085 (size=17520) 2024-12-02T23:11:29,224 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_0/usercache/jenkins/appcache/application_1733180955226_0001/container_1733180955226_0001_01_000002/launch_container.sh] 2024-12-02T23:11:29,224 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_0/usercache/jenkins/appcache/application_1733180955226_0001/container_1733180955226_0001_01_000002/container_tokens] 2024-12-02T23:11:29,224 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_0/usercache/jenkins/appcache/application_1733180955226_0001/container_1733180955226_0001_01_000002/sysfs] 2024-12-02T23:11:29,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741910_1086 (size=349746) 2024-12-02T23:11:29,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741910_1086 (size=349746) 2024-12-02T23:11:29,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741910_1086 (size=349746) 2024-12-02T23:11:29,257 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0001_000001 (auth:SIMPLE) from 127.0.0.1:41284 2024-12-02T23:11:31,304 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-02T23:11:31,306 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-02T23:11:31,318 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:31,318 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-02T23:11:31,319 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-02T23:11:31,319 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_71179475_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion at hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:31,319 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/.snapshotinfo 2024-12-02T23:11:31,319 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/data.manifest 2024-12-02T23:11:31,319 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_71179475_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733180973803/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion at hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733180973803/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:31,320 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733180973803/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/.snapshotinfo 2024-12-02T23:11:31,320 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733180973803/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/data.manifest 2024-12-02T23:11:31,332 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:31,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=31, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:31,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-02T23:11:31,341 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181091341"}]},"ts":"1733181091341"} 2024-12-02T23:11:31,343 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=DISABLING in hbase:meta 2024-12-02T23:11:31,344 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testExportFileSystemStateWithSplitRegion to state=DISABLING 2024-12-02T23:11:31,345 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=32, ppid=31, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportFileSystemStateWithSplitRegion}] 2024-12-02T23:11:31,350 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=3e0e18998b7a6d2647649ac0e22e86e1, UNASSIGN}, {pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=05ddcad59d270acecb26f8927fa92fb7, UNASSIGN}] 2024-12-02T23:11:31,351 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=05ddcad59d270acecb26f8927fa92fb7, UNASSIGN 2024-12-02T23:11:31,351 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=3e0e18998b7a6d2647649ac0e22e86e1, UNASSIGN 2024-12-02T23:11:31,352 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=33 updating hbase:meta row=3e0e18998b7a6d2647649ac0e22e86e1, regionState=CLOSING, regionLocation=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:11:31,352 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=05ddcad59d270acecb26f8927fa92fb7, regionState=CLOSING, regionLocation=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:11:31,364 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=05ddcad59d270acecb26f8927fa92fb7, UNASSIGN because future has completed 2024-12-02T23:11:31,364 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T23:11:31,365 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure 05ddcad59d270acecb26f8927fa92fb7, server=9e5ab3b623d7,41311,1733180945828}] 2024-12-02T23:11:31,365 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=3e0e18998b7a6d2647649ac0e22e86e1, UNASSIGN because future has completed 2024-12-02T23:11:31,366 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T23:11:31,366 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=36, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3e0e18998b7a6d2647649ac0e22e86e1, server=9e5ab3b623d7,41311,1733180945828}] 2024-12-02T23:11:31,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-02T23:11:31,518 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(122): Close 3e0e18998b7a6d2647649ac0e22e86e1 2024-12-02T23:11:31,518 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T23:11:31,518 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1722): Closing 3e0e18998b7a6d2647649ac0e22e86e1, disabling compactions & flushes 2024-12-02T23:11:31,518 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733180970452.3e0e18998b7a6d2647649ac0e22e86e1. 2024-12-02T23:11:31,518 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733180970452.3e0e18998b7a6d2647649ac0e22e86e1. 2024-12-02T23:11:31,518 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733180970452.3e0e18998b7a6d2647649ac0e22e86e1. after waiting 0 ms 2024-12-02T23:11:31,519 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733180970452.3e0e18998b7a6d2647649ac0e22e86e1. 2024-12-02T23:11:31,524 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/3e0e18998b7a6d2647649ac0e22e86e1/recovered.edits/10.seqid, newMaxSeqId=10, maxSeqId=6 2024-12-02T23:11:31,525 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:11:31,525 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733180970452.3e0e18998b7a6d2647649ac0e22e86e1. 2024-12-02T23:11:31,525 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1676): Region close journal for 3e0e18998b7a6d2647649ac0e22e86e1: Waiting for close lock at 1733181091518Running coprocessor pre-close hooks at 1733181091518Disabling compacts and flushes for region at 1733181091518Disabling writes for close at 1733181091518Writing region close event to WAL at 1733181091519 (+1 ms)Running coprocessor post-close hooks at 1733181091525 (+6 ms)Closed at 1733181091525 2024-12-02T23:11:31,527 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(157): Closed 3e0e18998b7a6d2647649ac0e22e86e1 2024-12-02T23:11:31,527 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(122): Close 05ddcad59d270acecb26f8927fa92fb7 2024-12-02T23:11:31,527 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T23:11:31,527 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1722): Closing 05ddcad59d270acecb26f8927fa92fb7, disabling compactions & flushes 2024-12-02T23:11:31,527 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,5,1733180970452.05ddcad59d270acecb26f8927fa92fb7. 2024-12-02T23:11:31,527 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,5,1733180970452.05ddcad59d270acecb26f8927fa92fb7. 2024-12-02T23:11:31,527 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,5,1733180970452.05ddcad59d270acecb26f8927fa92fb7. after waiting 0 ms 2024-12-02T23:11:31,527 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,5,1733180970452.05ddcad59d270acecb26f8927fa92fb7. 2024-12-02T23:11:31,528 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=33 updating hbase:meta row=3e0e18998b7a6d2647649ac0e22e86e1, regionState=CLOSED 2024-12-02T23:11:31,530 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=36, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3e0e18998b7a6d2647649ac0e22e86e1, server=9e5ab3b623d7,41311,1733180945828 because future has completed 2024-12-02T23:11:31,532 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/05ddcad59d270acecb26f8927fa92fb7/recovered.edits/10.seqid, newMaxSeqId=10, maxSeqId=6 2024-12-02T23:11:31,532 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:11:31,533 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,5,1733180970452.05ddcad59d270acecb26f8927fa92fb7. 2024-12-02T23:11:31,533 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1676): Region close journal for 05ddcad59d270acecb26f8927fa92fb7: Waiting for close lock at 1733181091527Running coprocessor pre-close hooks at 1733181091527Disabling compacts and flushes for region at 1733181091527Disabling writes for close at 1733181091527Writing region close event to WAL at 1733181091528 (+1 ms)Running coprocessor post-close hooks at 1733181091532 (+4 ms)Closed at 1733181091533 (+1 ms) 2024-12-02T23:11:31,533 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=36, resume processing ppid=33 2024-12-02T23:11:31,533 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=36, ppid=33, state=SUCCESS, hasLock=false; CloseRegionProcedure 3e0e18998b7a6d2647649ac0e22e86e1, server=9e5ab3b623d7,41311,1733180945828 in 165 msec 2024-12-02T23:11:31,535 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(157): Closed 05ddcad59d270acecb26f8927fa92fb7 2024-12-02T23:11:31,535 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=33, ppid=32, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=3e0e18998b7a6d2647649ac0e22e86e1, UNASSIGN in 183 msec 2024-12-02T23:11:31,535 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=05ddcad59d270acecb26f8927fa92fb7, regionState=CLOSED 2024-12-02T23:11:31,537 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=35, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure 05ddcad59d270acecb26f8927fa92fb7, server=9e5ab3b623d7,41311,1733180945828 because future has completed 2024-12-02T23:11:31,541 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=35, resume processing ppid=34 2024-12-02T23:11:31,541 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=35, ppid=34, state=SUCCESS, hasLock=false; CloseRegionProcedure 05ddcad59d270acecb26f8927fa92fb7, server=9e5ab3b623d7,41311,1733180945828 in 174 msec 2024-12-02T23:11:31,543 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=34, resume processing ppid=32 2024-12-02T23:11:31,544 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=34, ppid=32, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=05ddcad59d270acecb26f8927fa92fb7, UNASSIGN in 191 msec 2024-12-02T23:11:31,547 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=32, resume processing ppid=31 2024-12-02T23:11:31,547 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=32, ppid=31, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportFileSystemStateWithSplitRegion in 200 msec 2024-12-02T23:11:31,548 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181091548"}]},"ts":"1733181091548"} 2024-12-02T23:11:31,550 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=DISABLED in hbase:meta 2024-12-02T23:11:31,550 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testExportFileSystemStateWithSplitRegion to state=DISABLED 2024-12-02T23:11:31,552 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=31, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportFileSystemStateWithSplitRegion in 217 msec 2024-12-02T23:11:31,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-02T23:11:31,662 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-02T23:11:31,666 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:31,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=37, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:31,672 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=37, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:31,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] access.PermissionStorage(261): Removing permissions of removed table testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:31,674 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=37, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:31,675 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42365, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:11:31,677 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47387, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=ClientService 2024-12-02T23:11:31,679 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41311 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:31,682 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:31,682 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:31,682 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:31,682 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:31,684 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-02T23:11:31,684 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:31,684 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/62aeb8e89e443c08bcf2c307416afdb9 2024-12-02T23:11:31,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:11:31,685 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/3e0e18998b7a6d2647649ac0e22e86e1 2024-12-02T23:11:31,685 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data null 2024-12-02T23:11:31,685 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-02T23:11:31,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:11:31,685 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data null 2024-12-02T23:11:31,685 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-02T23:11:31,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:11:31,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:11:31,685 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data null 2024-12-02T23:11:31,686 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-02T23:11:31,686 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/05ddcad59d270acecb26f8927fa92fb7 2024-12-02T23:11:31,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-12-02T23:11:31,687 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T23:11:31,688 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T23:11:31,688 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T23:11:31,688 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T23:11:31,689 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/3e0e18998b7a6d2647649ac0e22e86e1/cf, FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/3e0e18998b7a6d2647649ac0e22e86e1/recovered.edits] 2024-12-02T23:11:31,689 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/05ddcad59d270acecb26f8927fa92fb7/cf, FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/05ddcad59d270acecb26f8927fa92fb7/recovered.edits] 2024-12-02T23:11:31,689 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/62aeb8e89e443c08bcf2c307416afdb9/cf, FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/62aeb8e89e443c08bcf2c307416afdb9/recovered.edits] 2024-12-02T23:11:31,697 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/3e0e18998b7a6d2647649ac0e22e86e1/cf/62b48487498549db84901af7bfe771b8_SeqId_4_.62aeb8e89e443c08bcf2c307416afdb9 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testExportFileSystemStateWithSplitRegion/3e0e18998b7a6d2647649ac0e22e86e1/cf/62b48487498549db84901af7bfe771b8_SeqId_4_.62aeb8e89e443c08bcf2c307416afdb9 2024-12-02T23:11:31,697 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/05ddcad59d270acecb26f8927fa92fb7/cf/62b48487498549db84901af7bfe771b8_SeqId_4_.62aeb8e89e443c08bcf2c307416afdb9 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testExportFileSystemStateWithSplitRegion/05ddcad59d270acecb26f8927fa92fb7/cf/62b48487498549db84901af7bfe771b8_SeqId_4_.62aeb8e89e443c08bcf2c307416afdb9 2024-12-02T23:11:31,698 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/62aeb8e89e443c08bcf2c307416afdb9/cf/62b48487498549db84901af7bfe771b8_SeqId_4_ to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testExportFileSystemStateWithSplitRegion/62aeb8e89e443c08bcf2c307416afdb9/cf/62b48487498549db84901af7bfe771b8_SeqId_4_ 2024-12-02T23:11:31,702 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/05ddcad59d270acecb26f8927fa92fb7/recovered.edits/10.seqid to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testExportFileSystemStateWithSplitRegion/05ddcad59d270acecb26f8927fa92fb7/recovered.edits/10.seqid 2024-12-02T23:11:31,702 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/62aeb8e89e443c08bcf2c307416afdb9/recovered.edits/6.seqid to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testExportFileSystemStateWithSplitRegion/62aeb8e89e443c08bcf2c307416afdb9/recovered.edits/6.seqid 2024-12-02T23:11:31,703 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/3e0e18998b7a6d2647649ac0e22e86e1/recovered.edits/10.seqid to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testExportFileSystemStateWithSplitRegion/3e0e18998b7a6d2647649ac0e22e86e1/recovered.edits/10.seqid 2024-12-02T23:11:31,703 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/05ddcad59d270acecb26f8927fa92fb7 2024-12-02T23:11:31,703 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/62aeb8e89e443c08bcf2c307416afdb9 2024-12-02T23:11:31,703 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportFileSystemStateWithSplitRegion/3e0e18998b7a6d2647649ac0e22e86e1 2024-12-02T23:11:31,704 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testExportFileSystemStateWithSplitRegion regions 2024-12-02T23:11:31,707 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=37, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:31,713 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39847 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-02T23:11:31,719 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 3 rows of testExportFileSystemStateWithSplitRegion from hbase:meta 2024-12-02T23:11:31,726 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testExportFileSystemStateWithSplitRegion' descriptor. 2024-12-02T23:11:31,728 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=37, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:31,728 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testExportFileSystemStateWithSplitRegion' from region states. 2024-12-02T23:11:31,728 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,,1733180961818.62aeb8e89e443c08bcf2c307416afdb9.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733181091728"}]},"ts":"9223372036854775807"} 2024-12-02T23:11:31,728 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,,1733180970452.3e0e18998b7a6d2647649ac0e22e86e1.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733181091728"}]},"ts":"9223372036854775807"} 2024-12-02T23:11:31,729 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,5,1733180970452.05ddcad59d270acecb26f8927fa92fb7.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733181091728"}]},"ts":"9223372036854775807"} 2024-12-02T23:11:31,731 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 3 regions from META 2024-12-02T23:11:31,731 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 62aeb8e89e443c08bcf2c307416afdb9, NAME => 'testExportFileSystemStateWithSplitRegion,,1733180961818.62aeb8e89e443c08bcf2c307416afdb9.', STARTKEY => '', ENDKEY => ''}, {ENCODED => 3e0e18998b7a6d2647649ac0e22e86e1, NAME => 'testExportFileSystemStateWithSplitRegion,,1733180970452.3e0e18998b7a6d2647649ac0e22e86e1.', STARTKEY => '', ENDKEY => '5'}, {ENCODED => 05ddcad59d270acecb26f8927fa92fb7, NAME => 'testExportFileSystemStateWithSplitRegion,5,1733180970452.05ddcad59d270acecb26f8927fa92fb7.', STARTKEY => '5', ENDKEY => ''}] 2024-12-02T23:11:31,732 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testExportFileSystemStateWithSplitRegion' as deleted. 2024-12-02T23:11:31,732 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733181091732"}]},"ts":"9223372036854775807"} 2024-12-02T23:11:31,734 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testExportFileSystemStateWithSplitRegion state from META 2024-12-02T23:11:31,735 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=37, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:31,736 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=37, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion in 69 msec 2024-12-02T23:11:31,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-12-02T23:11:31,793 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:31,793 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-02T23:11:31,794 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:31,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=38, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:31,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-02T23:11:31,798 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181091798"}]},"ts":"1733181091798"} 2024-12-02T23:11:31,800 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=DISABLING in hbase:meta 2024-12-02T23:11:31,800 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSplitRegion to state=DISABLING 2024-12-02T23:11:31,801 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSplitRegion}] 2024-12-02T23:11:31,803 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=047f49362811e322054c938a41b68e9e, UNASSIGN}, {pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=3de09e2f50b4bb1a4fe80eb412d983f6, UNASSIGN}] 2024-12-02T23:11:31,804 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=3de09e2f50b4bb1a4fe80eb412d983f6, UNASSIGN 2024-12-02T23:11:31,804 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=047f49362811e322054c938a41b68e9e, UNASSIGN 2024-12-02T23:11:31,805 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=41 updating hbase:meta row=3de09e2f50b4bb1a4fe80eb412d983f6, regionState=CLOSING, regionLocation=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:11:31,805 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=047f49362811e322054c938a41b68e9e, regionState=CLOSING, regionLocation=9e5ab3b623d7,38049,1733180945779 2024-12-02T23:11:31,807 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=3de09e2f50b4bb1a4fe80eb412d983f6, UNASSIGN because future has completed 2024-12-02T23:11:31,808 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T23:11:31,808 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3de09e2f50b4bb1a4fe80eb412d983f6, server=9e5ab3b623d7,41311,1733180945828}] 2024-12-02T23:11:31,808 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=047f49362811e322054c938a41b68e9e, UNASSIGN because future has completed 2024-12-02T23:11:31,809 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T23:11:31,809 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=43, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure 047f49362811e322054c938a41b68e9e, server=9e5ab3b623d7,38049,1733180945779}] 2024-12-02T23:11:31,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-02T23:11:31,961 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(122): Close 3de09e2f50b4bb1a4fe80eb412d983f6 2024-12-02T23:11:31,961 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T23:11:31,961 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1722): Closing 3de09e2f50b4bb1a4fe80eb412d983f6, disabling compactions & flushes 2024-12-02T23:11:31,961 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,1,1733180958530.3de09e2f50b4bb1a4fe80eb412d983f6. 2024-12-02T23:11:31,962 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733180958530.3de09e2f50b4bb1a4fe80eb412d983f6. 2024-12-02T23:11:31,962 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733180958530.3de09e2f50b4bb1a4fe80eb412d983f6. after waiting 0 ms 2024-12-02T23:11:31,962 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,1,1733180958530.3de09e2f50b4bb1a4fe80eb412d983f6. 2024-12-02T23:11:31,964 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54545, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T23:11:31,965 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(122): Close 047f49362811e322054c938a41b68e9e 2024-12-02T23:11:31,965 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T23:11:31,965 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1722): Closing 047f49362811e322054c938a41b68e9e, disabling compactions & flushes 2024-12-02T23:11:31,965 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,,1733180958530.047f49362811e322054c938a41b68e9e. 2024-12-02T23:11:31,965 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733180958530.047f49362811e322054c938a41b68e9e. 2024-12-02T23:11:31,965 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733180958530.047f49362811e322054c938a41b68e9e. after waiting 0 ms 2024-12-02T23:11:31,965 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,,1733180958530.047f49362811e322054c938a41b68e9e. 2024-12-02T23:11:31,969 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSplitRegion/3de09e2f50b4bb1a4fe80eb412d983f6/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T23:11:31,970 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:11:31,970 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,1,1733180958530.3de09e2f50b4bb1a4fe80eb412d983f6. 2024-12-02T23:11:31,970 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1676): Region close journal for 3de09e2f50b4bb1a4fe80eb412d983f6: Waiting for close lock at 1733181091961Running coprocessor pre-close hooks at 1733181091961Disabling compacts and flushes for region at 1733181091961Disabling writes for close at 1733181091962 (+1 ms)Writing region close event to WAL at 1733181091962Running coprocessor post-close hooks at 1733181091970 (+8 ms)Closed at 1733181091970 2024-12-02T23:11:31,972 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(157): Closed 3de09e2f50b4bb1a4fe80eb412d983f6 2024-12-02T23:11:31,973 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSplitRegion/047f49362811e322054c938a41b68e9e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T23:11:31,973 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=41 updating hbase:meta row=3de09e2f50b4bb1a4fe80eb412d983f6, regionState=CLOSED 2024-12-02T23:11:31,973 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:11:31,973 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,,1733180958530.047f49362811e322054c938a41b68e9e. 2024-12-02T23:11:31,973 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1676): Region close journal for 047f49362811e322054c938a41b68e9e: Waiting for close lock at 1733181091965Running coprocessor pre-close hooks at 1733181091965Disabling compacts and flushes for region at 1733181091965Disabling writes for close at 1733181091965Writing region close event to WAL at 1733181091968 (+3 ms)Running coprocessor post-close hooks at 1733181091973 (+5 ms)Closed at 1733181091973 2024-12-02T23:11:31,975 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=42, ppid=41, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3de09e2f50b4bb1a4fe80eb412d983f6, server=9e5ab3b623d7,41311,1733180945828 because future has completed 2024-12-02T23:11:31,976 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(157): Closed 047f49362811e322054c938a41b68e9e 2024-12-02T23:11:31,977 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=047f49362811e322054c938a41b68e9e, regionState=CLOSED 2024-12-02T23:11:31,979 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=43, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure 047f49362811e322054c938a41b68e9e, server=9e5ab3b623d7,38049,1733180945779 because future has completed 2024-12-02T23:11:31,979 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=42, resume processing ppid=41 2024-12-02T23:11:31,979 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=42, ppid=41, state=SUCCESS, hasLock=false; CloseRegionProcedure 3de09e2f50b4bb1a4fe80eb412d983f6, server=9e5ab3b623d7,41311,1733180945828 in 169 msec 2024-12-02T23:11:31,982 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=41, ppid=39, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=3de09e2f50b4bb1a4fe80eb412d983f6, UNASSIGN in 176 msec 2024-12-02T23:11:31,984 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=43, resume processing ppid=40 2024-12-02T23:11:31,984 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=43, ppid=40, state=SUCCESS, hasLock=false; CloseRegionProcedure 047f49362811e322054c938a41b68e9e, server=9e5ab3b623d7,38049,1733180945779 in 173 msec 2024-12-02T23:11:31,986 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=40, resume processing ppid=39 2024-12-02T23:11:31,986 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=40, ppid=39, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=047f49362811e322054c938a41b68e9e, UNASSIGN in 181 msec 2024-12-02T23:11:31,989 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=39, resume processing ppid=38 2024-12-02T23:11:31,989 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=39, ppid=38, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 186 msec 2024-12-02T23:11:31,991 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181091991"}]},"ts":"1733181091991"} 2024-12-02T23:11:31,993 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=DISABLED in hbase:meta 2024-12-02T23:11:31,993 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSplitRegion to state=DISABLED 2024-12-02T23:11:31,995 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=38, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 199 msec 2024-12-02T23:11:32,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-02T23:11:32,112 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-02T23:11:32,113 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:32,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:32,115 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:32,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:32,116 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=44, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:32,118 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41311 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:32,120 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSplitRegion/047f49362811e322054c938a41b68e9e 2024-12-02T23:11:32,120 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSplitRegion/3de09e2f50b4bb1a4fe80eb412d983f6 2024-12-02T23:11:32,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:32,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:32,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:32,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:32,123 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-02T23:11:32,123 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-02T23:11:32,123 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-02T23:11:32,123 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-02T23:11:32,123 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSplitRegion/3de09e2f50b4bb1a4fe80eb412d983f6/cf, FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSplitRegion/3de09e2f50b4bb1a4fe80eb412d983f6/recovered.edits] 2024-12-02T23:11:32,124 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSplitRegion/047f49362811e322054c938a41b68e9e/cf, FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSplitRegion/047f49362811e322054c938a41b68e9e/recovered.edits] 2024-12-02T23:11:32,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:32,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:11:32,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:32,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:11:32,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:32,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:11:32,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:32,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:11:32,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-12-02T23:11:32,132 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSplitRegion/047f49362811e322054c938a41b68e9e/cf/53ed2530e40342189507eac752e5c90c to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/047f49362811e322054c938a41b68e9e/cf/53ed2530e40342189507eac752e5c90c 2024-12-02T23:11:32,135 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSplitRegion/3de09e2f50b4bb1a4fe80eb412d983f6/cf/b7349dac625a4ff0927263c7fbaf6c64 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/3de09e2f50b4bb1a4fe80eb412d983f6/cf/b7349dac625a4ff0927263c7fbaf6c64 2024-12-02T23:11:32,136 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSplitRegion/047f49362811e322054c938a41b68e9e/recovered.edits/9.seqid to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/047f49362811e322054c938a41b68e9e/recovered.edits/9.seqid 2024-12-02T23:11:32,136 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSplitRegion/047f49362811e322054c938a41b68e9e 2024-12-02T23:11:32,141 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSplitRegion/3de09e2f50b4bb1a4fe80eb412d983f6/recovered.edits/9.seqid to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/3de09e2f50b4bb1a4fe80eb412d983f6/recovered.edits/9.seqid 2024-12-02T23:11:32,141 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSplitRegion/3de09e2f50b4bb1a4fe80eb412d983f6 2024-12-02T23:11:32,142 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSplitRegion regions 2024-12-02T23:11:32,142 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c 2024-12-02T23:11:32,143 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf] 2024-12-02T23:11:32,148 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b20241202a5585914680a4d11a1e81b78be14c49f_3de09e2f50b4bb1a4fe80eb412d983f6 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b20241202a5585914680a4d11a1e81b78be14c49f_3de09e2f50b4bb1a4fe80eb412d983f6 2024-12-02T23:11:32,150 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e20241202015af06e493b45eea7f3120f42d22377_047f49362811e322054c938a41b68e9e to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e20241202015af06e493b45eea7f3120f42d22377_047f49362811e322054c938a41b68e9e 2024-12-02T23:11:32,150 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c 2024-12-02T23:11:32,153 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=44, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:32,156 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSplitRegion from hbase:meta 2024-12-02T23:11:32,158 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSplitRegion' descriptor. 2024-12-02T23:11:32,159 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=44, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:32,160 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSplitRegion' from region states. 2024-12-02T23:11:32,160 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion,,1733180958530.047f49362811e322054c938a41b68e9e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733181092160"}]},"ts":"9223372036854775807"} 2024-12-02T23:11:32,160 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion,1,1733180958530.3de09e2f50b4bb1a4fe80eb412d983f6.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733181092160"}]},"ts":"9223372036854775807"} 2024-12-02T23:11:32,163 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-02T23:11:32,163 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 047f49362811e322054c938a41b68e9e, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733180958530.047f49362811e322054c938a41b68e9e.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 3de09e2f50b4bb1a4fe80eb412d983f6, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733180958530.3de09e2f50b4bb1a4fe80eb412d983f6.', STARTKEY => '1', ENDKEY => ''}] 2024-12-02T23:11:32,163 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSplitRegion' as deleted. 2024-12-02T23:11:32,163 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733181092163"}]},"ts":"9223372036854775807"} 2024-12-02T23:11:32,165 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSplitRegion state from META 2024-12-02T23:11:32,165 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=44, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:32,167 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=44, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 53 msec 2024-12-02T23:11:32,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-12-02T23:11:32,232 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:32,232 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-02T23:11:32,249 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-02T23:11:32,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:32,253 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snapshot-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-02T23:11:32,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:32,256 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-02T23:11:32,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:32,295 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSplitRegion Thread=752 (was 711) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-1404 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_71179475_22 at /127.0.0.1:51480 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_71179475_22 at /127.0.0.1:45290 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-438134689_1 at /127.0.0.1:48808 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43971 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/9e5ab3b623d7:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-438134689_1 at /127.0.0.1:51462 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Container metrics unregistration java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/9e5ab3b623d7:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 21136) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_71179475_22 at /127.0.0.1:48824 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/9e5ab3b623d7:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (243496960) connection to localhost/127.0.0.1:33369 from jenkins.hfs.1 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: region-location-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (243496960) connection to localhost/127.0.0.1:43971 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=773 (was 759) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=681 (was 732), ProcessCount=19 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=559 (was 5985) 2024-12-02T23:11:32,296 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=752 is superior to 500 2024-12-02T23:11:32,313 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithTargetName Thread=752, OpenFileDescriptor=773, MaxFileDescriptor=1048576, SystemLoadAverage=681, ProcessCount=19, AvailableMemoryMB=558 2024-12-02T23:11:32,313 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=752 is superior to 500 2024-12-02T23:11:32,314 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T23:11:32,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName 2024-12-02T23:11:32,317 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T23:11:32,317 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithTargetName" procId is: 45 2024-12-02T23:11:32,318 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T23:11:32,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-02T23:11:32,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741911_1087 (size=442) 2024-12-02T23:11:32,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741911_1087 (size=442) 2024-12-02T23:11:32,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741911_1087 (size=442) 2024-12-02T23:11:32,328 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => af79d9ce777258b19189eb2d2a51bfa6, NAME => 'testtb-testExportWithTargetName,,1733181092314.af79d9ce777258b19189eb2d2a51bfa6.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:11:32,329 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => e4f24037e0492bd7433eae013e6abc0e, NAME => 'testtb-testExportWithTargetName,1,1733181092314.e4f24037e0492bd7433eae013e6abc0e.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:11:32,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741912_1088 (size=67) 2024-12-02T23:11:32,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741912_1088 (size=67) 2024-12-02T23:11:32,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741912_1088 (size=67) 2024-12-02T23:11:32,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741913_1089 (size=67) 2024-12-02T23:11:32,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741913_1089 (size=67) 2024-12-02T23:11:32,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741913_1089 (size=67) 2024-12-02T23:11:32,338 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1733181092314.af79d9ce777258b19189eb2d2a51bfa6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:11:32,338 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1722): Closing af79d9ce777258b19189eb2d2a51bfa6, disabling compactions & flushes 2024-12-02T23:11:32,338 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1733181092314.af79d9ce777258b19189eb2d2a51bfa6. 2024-12-02T23:11:32,339 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1733181092314.af79d9ce777258b19189eb2d2a51bfa6. 2024-12-02T23:11:32,339 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1733181092314.af79d9ce777258b19189eb2d2a51bfa6. after waiting 0 ms 2024-12-02T23:11:32,339 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1733181092314.af79d9ce777258b19189eb2d2a51bfa6. 2024-12-02T23:11:32,339 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1733181092314.af79d9ce777258b19189eb2d2a51bfa6. 2024-12-02T23:11:32,339 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1676): Region close journal for af79d9ce777258b19189eb2d2a51bfa6: Waiting for close lock at 1733181092338Disabling compacts and flushes for region at 1733181092338Disabling writes for close at 1733181092339 (+1 ms)Writing region close event to WAL at 1733181092339Closed at 1733181092339 2024-12-02T23:11:32,339 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1733181092314.e4f24037e0492bd7433eae013e6abc0e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:11:32,339 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1722): Closing e4f24037e0492bd7433eae013e6abc0e, disabling compactions & flushes 2024-12-02T23:11:32,339 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1733181092314.e4f24037e0492bd7433eae013e6abc0e. 2024-12-02T23:11:32,339 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1733181092314.e4f24037e0492bd7433eae013e6abc0e. 2024-12-02T23:11:32,339 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1733181092314.e4f24037e0492bd7433eae013e6abc0e. after waiting 0 ms 2024-12-02T23:11:32,339 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1733181092314.e4f24037e0492bd7433eae013e6abc0e. 2024-12-02T23:11:32,339 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1733181092314.e4f24037e0492bd7433eae013e6abc0e. 2024-12-02T23:11:32,339 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1676): Region close journal for e4f24037e0492bd7433eae013e6abc0e: Waiting for close lock at 1733181092339Disabling compacts and flushes for region at 1733181092339Disabling writes for close at 1733181092339Writing region close event to WAL at 1733181092339Closed at 1733181092339 2024-12-02T23:11:32,340 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T23:11:32,341 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,,1733181092314.af79d9ce777258b19189eb2d2a51bfa6.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1733181092341"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733181092341"}]},"ts":"1733181092341"} 2024-12-02T23:11:32,341 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,1,1733181092314.e4f24037e0492bd7433eae013e6abc0e.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1733181092341"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733181092341"}]},"ts":"1733181092341"} 2024-12-02T23:11:32,343 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T23:11:32,344 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T23:11:32,344 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181092344"}]},"ts":"1733181092344"} 2024-12-02T23:11:32,346 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLING in hbase:meta 2024-12-02T23:11:32,346 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {9e5ab3b623d7=0} racks are {/default-rack=0} 2024-12-02T23:11:32,347 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T23:11:32,347 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T23:11:32,347 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T23:11:32,347 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T23:11:32,347 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T23:11:32,347 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T23:11:32,348 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T23:11:32,348 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T23:11:32,348 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T23:11:32,348 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T23:11:32,348 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=af79d9ce777258b19189eb2d2a51bfa6, ASSIGN}, {pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e4f24037e0492bd7433eae013e6abc0e, ASSIGN}] 2024-12-02T23:11:32,349 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e4f24037e0492bd7433eae013e6abc0e, ASSIGN 2024-12-02T23:11:32,349 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=af79d9ce777258b19189eb2d2a51bfa6, ASSIGN 2024-12-02T23:11:32,350 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e4f24037e0492bd7433eae013e6abc0e, ASSIGN; state=OFFLINE, location=9e5ab3b623d7,38049,1733180945779; forceNewPlan=false, retain=false 2024-12-02T23:11:32,350 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=af79d9ce777258b19189eb2d2a51bfa6, ASSIGN; state=OFFLINE, location=9e5ab3b623d7,39847,1733180945622; forceNewPlan=false, retain=false 2024-12-02T23:11:32,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-02T23:11:32,501 INFO [9e5ab3b623d7:41513 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T23:11:32,501 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=af79d9ce777258b19189eb2d2a51bfa6, regionState=OPENING, regionLocation=9e5ab3b623d7,39847,1733180945622 2024-12-02T23:11:32,501 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=e4f24037e0492bd7433eae013e6abc0e, regionState=OPENING, regionLocation=9e5ab3b623d7,38049,1733180945779 2024-12-02T23:11:32,504 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=af79d9ce777258b19189eb2d2a51bfa6, ASSIGN because future has completed 2024-12-02T23:11:32,504 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=48, ppid=46, state=RUNNABLE, hasLock=false; OpenRegionProcedure af79d9ce777258b19189eb2d2a51bfa6, server=9e5ab3b623d7,39847,1733180945622}] 2024-12-02T23:11:32,505 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e4f24037e0492bd7433eae013e6abc0e, ASSIGN because future has completed 2024-12-02T23:11:32,505 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=49, ppid=47, state=RUNNABLE, hasLock=false; OpenRegionProcedure e4f24037e0492bd7433eae013e6abc0e, server=9e5ab3b623d7,38049,1733180945779}] 2024-12-02T23:11:32,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-02T23:11:32,658 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43067, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T23:11:32,662 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,1,1733181092314.e4f24037e0492bd7433eae013e6abc0e. 2024-12-02T23:11:32,663 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7752): Opening region: {ENCODED => e4f24037e0492bd7433eae013e6abc0e, NAME => 'testtb-testExportWithTargetName,1,1733181092314.e4f24037e0492bd7433eae013e6abc0e.', STARTKEY => '1', ENDKEY => ''} 2024-12-02T23:11:32,663 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,1,1733181092314.e4f24037e0492bd7433eae013e6abc0e. service=AccessControlService 2024-12-02T23:11:32,664 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T23:11:32,664 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName e4f24037e0492bd7433eae013e6abc0e 2024-12-02T23:11:32,664 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1733181092314.e4f24037e0492bd7433eae013e6abc0e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:11:32,664 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7794): checking encryption for e4f24037e0492bd7433eae013e6abc0e 2024-12-02T23:11:32,664 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7797): checking classloading for e4f24037e0492bd7433eae013e6abc0e 2024-12-02T23:11:32,666 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,,1733181092314.af79d9ce777258b19189eb2d2a51bfa6. 2024-12-02T23:11:32,666 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7752): Opening region: {ENCODED => af79d9ce777258b19189eb2d2a51bfa6, NAME => 'testtb-testExportWithTargetName,,1733181092314.af79d9ce777258b19189eb2d2a51bfa6.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T23:11:32,667 INFO [StoreOpener-e4f24037e0492bd7433eae013e6abc0e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e4f24037e0492bd7433eae013e6abc0e 2024-12-02T23:11:32,667 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,,1733181092314.af79d9ce777258b19189eb2d2a51bfa6. service=AccessControlService 2024-12-02T23:11:32,667 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T23:11:32,667 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName af79d9ce777258b19189eb2d2a51bfa6 2024-12-02T23:11:32,667 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1733181092314.af79d9ce777258b19189eb2d2a51bfa6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:11:32,667 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7794): checking encryption for af79d9ce777258b19189eb2d2a51bfa6 2024-12-02T23:11:32,668 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7797): checking classloading for af79d9ce777258b19189eb2d2a51bfa6 2024-12-02T23:11:32,669 INFO [StoreOpener-e4f24037e0492bd7433eae013e6abc0e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e4f24037e0492bd7433eae013e6abc0e columnFamilyName cf 2024-12-02T23:11:32,670 INFO [StoreOpener-af79d9ce777258b19189eb2d2a51bfa6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region af79d9ce777258b19189eb2d2a51bfa6 2024-12-02T23:11:32,672 INFO [StoreOpener-af79d9ce777258b19189eb2d2a51bfa6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region af79d9ce777258b19189eb2d2a51bfa6 columnFamilyName cf 2024-12-02T23:11:32,673 DEBUG [StoreOpener-af79d9ce777258b19189eb2d2a51bfa6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:11:32,674 DEBUG [StoreOpener-e4f24037e0492bd7433eae013e6abc0e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:11:32,679 INFO [StoreOpener-e4f24037e0492bd7433eae013e6abc0e-1 {}] regionserver.HStore(327): Store=e4f24037e0492bd7433eae013e6abc0e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T23:11:32,680 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1038): replaying wal for e4f24037e0492bd7433eae013e6abc0e 2024-12-02T23:11:32,681 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithTargetName/e4f24037e0492bd7433eae013e6abc0e 2024-12-02T23:11:32,682 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithTargetName/e4f24037e0492bd7433eae013e6abc0e 2024-12-02T23:11:32,683 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1048): stopping wal replay for e4f24037e0492bd7433eae013e6abc0e 2024-12-02T23:11:32,683 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1060): Cleaning up temporary data for e4f24037e0492bd7433eae013e6abc0e 2024-12-02T23:11:32,684 INFO [StoreOpener-af79d9ce777258b19189eb2d2a51bfa6-1 {}] regionserver.HStore(327): Store=af79d9ce777258b19189eb2d2a51bfa6/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T23:11:32,685 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1038): replaying wal for af79d9ce777258b19189eb2d2a51bfa6 2024-12-02T23:11:32,686 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1093): writing seq id for e4f24037e0492bd7433eae013e6abc0e 2024-12-02T23:11:32,688 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithTargetName/af79d9ce777258b19189eb2d2a51bfa6 2024-12-02T23:11:32,689 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithTargetName/af79d9ce777258b19189eb2d2a51bfa6 2024-12-02T23:11:32,689 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithTargetName/e4f24037e0492bd7433eae013e6abc0e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T23:11:32,689 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1114): Opened e4f24037e0492bd7433eae013e6abc0e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65071467, jitterRate=-0.030359581112861633}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T23:11:32,690 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e4f24037e0492bd7433eae013e6abc0e 2024-12-02T23:11:32,690 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1048): stopping wal replay for af79d9ce777258b19189eb2d2a51bfa6 2024-12-02T23:11:32,690 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1060): Cleaning up temporary data for af79d9ce777258b19189eb2d2a51bfa6 2024-12-02T23:11:32,691 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1006): Region open journal for e4f24037e0492bd7433eae013e6abc0e: Running coprocessor pre-open hook at 1733181092664Writing region info on filesystem at 1733181092664Initializing all the Stores at 1733181092666 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733181092666Cleaning up temporary data from old regions at 1733181092683 (+17 ms)Running coprocessor post-open hooks at 1733181092690 (+7 ms)Region opened successfully at 1733181092691 (+1 ms) 2024-12-02T23:11:32,692 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,1,1733181092314.e4f24037e0492bd7433eae013e6abc0e., pid=49, masterSystemTime=1733181092657 2024-12-02T23:11:32,693 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1093): writing seq id for af79d9ce777258b19189eb2d2a51bfa6 2024-12-02T23:11:32,696 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,1,1733181092314.e4f24037e0492bd7433eae013e6abc0e. 2024-12-02T23:11:32,696 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,1,1733181092314.e4f24037e0492bd7433eae013e6abc0e. 2024-12-02T23:11:32,696 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=e4f24037e0492bd7433eae013e6abc0e, regionState=OPEN, openSeqNum=2, regionLocation=9e5ab3b623d7,38049,1733180945779 2024-12-02T23:11:32,706 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithTargetName/af79d9ce777258b19189eb2d2a51bfa6/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T23:11:32,707 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1114): Opened af79d9ce777258b19189eb2d2a51bfa6; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67683165, jitterRate=0.008557751774787903}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T23:11:32,707 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1122): Running coprocessor post-open hooks for af79d9ce777258b19189eb2d2a51bfa6 2024-12-02T23:11:32,707 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1006): Region open journal for af79d9ce777258b19189eb2d2a51bfa6: Running coprocessor pre-open hook at 1733181092668Writing region info on filesystem at 1733181092668Initializing all the Stores at 1733181092669 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733181092669Cleaning up temporary data from old regions at 1733181092690 (+21 ms)Running coprocessor post-open hooks at 1733181092707 (+17 ms)Region opened successfully at 1733181092707 2024-12-02T23:11:32,707 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=49, ppid=47, state=RUNNABLE, hasLock=false; OpenRegionProcedure e4f24037e0492bd7433eae013e6abc0e, server=9e5ab3b623d7,38049,1733180945779 because future has completed 2024-12-02T23:11:32,708 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,,1733181092314.af79d9ce777258b19189eb2d2a51bfa6., pid=48, masterSystemTime=1733181092656 2024-12-02T23:11:32,713 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=af79d9ce777258b19189eb2d2a51bfa6, regionState=OPEN, openSeqNum=2, regionLocation=9e5ab3b623d7,39847,1733180945622 2024-12-02T23:11:32,714 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,,1733181092314.af79d9ce777258b19189eb2d2a51bfa6. 2024-12-02T23:11:32,714 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,,1733181092314.af79d9ce777258b19189eb2d2a51bfa6. 2024-12-02T23:11:32,715 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=49, resume processing ppid=47 2024-12-02T23:11:32,715 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=49, ppid=47, state=SUCCESS, hasLock=false; OpenRegionProcedure e4f24037e0492bd7433eae013e6abc0e, server=9e5ab3b623d7,38049,1733180945779 in 207 msec 2024-12-02T23:11:32,718 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=48, ppid=46, state=RUNNABLE, hasLock=false; OpenRegionProcedure af79d9ce777258b19189eb2d2a51bfa6, server=9e5ab3b623d7,39847,1733180945622 because future has completed 2024-12-02T23:11:32,718 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=47, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e4f24037e0492bd7433eae013e6abc0e, ASSIGN in 367 msec 2024-12-02T23:11:32,722 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=48, resume processing ppid=46 2024-12-02T23:11:32,723 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=48, ppid=46, state=SUCCESS, hasLock=false; OpenRegionProcedure af79d9ce777258b19189eb2d2a51bfa6, server=9e5ab3b623d7,39847,1733180945622 in 215 msec 2024-12-02T23:11:32,726 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=46, resume processing ppid=45 2024-12-02T23:11:32,726 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=46, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=af79d9ce777258b19189eb2d2a51bfa6, ASSIGN in 375 msec 2024-12-02T23:11:32,728 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T23:11:32,728 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181092728"}]},"ts":"1733181092728"} 2024-12-02T23:11:32,734 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLED in hbase:meta 2024-12-02T23:11:32,734 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T23:11:32,735 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithTargetName jenkins: RWXCA 2024-12-02T23:11:32,738 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41311 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-02T23:11:32,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:11:32,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:11:32,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:11:32,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:11:32,745 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-02T23:11:32,745 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-02T23:11:32,746 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-02T23:11:32,746 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-02T23:11:32,747 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=45, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName in 430 msec 2024-12-02T23:11:32,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-02T23:11:32,942 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-02T23:11:32,942 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-02T23:11:32,946 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38452, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:11:32,952 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-12-02T23:11:32,952 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1733181092314.af79d9ce777258b19189eb2d2a51bfa6. 2024-12-02T23:11:32,952 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T23:11:32,954 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-02T23:11:32,960 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-02T23:11:32,966 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43736, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:11:32,967 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-02T23:11:32,971 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-02T23:11:32,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733181092971 (current time:1733181092971). 2024-12-02T23:11:32,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T23:11:32,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-02T23:11:32,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T23:11:32,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@751f0274, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:11:32,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:11:32,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:11:32,973 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:11:32,973 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:11:32,973 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:11:32,973 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3def1348, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:11:32,973 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:11:32,974 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:11:32,974 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:11:32,974 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40832, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:11:32,975 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1059ba30, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:11:32,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:11:32,976 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:11:32,977 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:11:32,977 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38460, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:11:32,979 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:11:32,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:11:32,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:11:32,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:11:32,979 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:11:32,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@266a8609, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:11:32,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:11:32,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:11:32,983 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:11:32,983 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:11:32,983 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:11:32,983 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7dbfea0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:11:32,983 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:11:32,983 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:11:32,984 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:11:32,984 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40862, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:11:32,985 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@188d26e9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:11:32,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:11:32,988 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:11:32,988 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:11:32,989 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38476, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:11:32,991 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b., hostname=9e5ab3b623d7,41311,1733180945828, seqNum=2] 2024-12-02T23:11:32,991 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:11:32,993 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54270, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:11:32,994 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:11:32,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:11:32,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:11:32,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:11:32,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-02T23:11:32,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T23:11:32,997 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:11:32,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=50, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-02T23:11:32,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 50 2024-12-02T23:11:32,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-02T23:11:32,999 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T23:11:33,002 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T23:11:33,004 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T23:11:33,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741914_1090 (size=167) 2024-12-02T23:11:33,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741914_1090 (size=167) 2024-12-02T23:11:33,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741914_1090 (size=167) 2024-12-02T23:11:33,020 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T23:11:33,020 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure af79d9ce777258b19189eb2d2a51bfa6}, {pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e4f24037e0492bd7433eae013e6abc0e}] 2024-12-02T23:11:33,025 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure af79d9ce777258b19189eb2d2a51bfa6 2024-12-02T23:11:33,025 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e4f24037e0492bd7433eae013e6abc0e 2024-12-02T23:11:33,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-02T23:11:33,177 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39847 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=51 2024-12-02T23:11:33,177 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38049 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=52 2024-12-02T23:11:33,178 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1733181092314.af79d9ce777258b19189eb2d2a51bfa6. 2024-12-02T23:11:33,178 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1733181092314.e4f24037e0492bd7433eae013e6abc0e. 2024-12-02T23:11:33,178 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.HRegion(2603): Flush status journal for af79d9ce777258b19189eb2d2a51bfa6: 2024-12-02T23:11:33,178 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1733181092314.af79d9ce777258b19189eb2d2a51bfa6. for emptySnaptb0-testExportWithTargetName completed. 2024-12-02T23:11:33,178 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.HRegion(2603): Flush status journal for e4f24037e0492bd7433eae013e6abc0e: 2024-12-02T23:11:33,178 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1733181092314.e4f24037e0492bd7433eae013e6abc0e. for emptySnaptb0-testExportWithTargetName completed. 2024-12-02T23:11:33,178 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1733181092314.af79d9ce777258b19189eb2d2a51bfa6.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-02T23:11:33,178 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:11:33,179 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T23:11:33,179 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1733181092314.e4f24037e0492bd7433eae013e6abc0e.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-02T23:11:33,179 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:11:33,179 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T23:11:33,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741916_1092 (size=70) 2024-12-02T23:11:33,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741915_1091 (size=70) 2024-12-02T23:11:33,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741915_1091 (size=70) 2024-12-02T23:11:33,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741916_1092 (size=70) 2024-12-02T23:11:33,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741916_1092 (size=70) 2024-12-02T23:11:33,189 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1733181092314.e4f24037e0492bd7433eae013e6abc0e. 2024-12-02T23:11:33,190 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-12-02T23:11:33,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741915_1091 (size=70) 2024-12-02T23:11:33,190 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1733181092314.af79d9ce777258b19189eb2d2a51bfa6. 2024-12-02T23:11:33,190 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=51 2024-12-02T23:11:33,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=52 2024-12-02T23:11:33,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=51 2024-12-02T23:11:33,190 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region e4f24037e0492bd7433eae013e6abc0e 2024-12-02T23:11:33,190 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region af79d9ce777258b19189eb2d2a51bfa6 2024-12-02T23:11:33,190 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure af79d9ce777258b19189eb2d2a51bfa6 2024-12-02T23:11:33,191 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e4f24037e0492bd7433eae013e6abc0e 2024-12-02T23:11:33,193 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=51, ppid=50, state=SUCCESS, hasLock=false; SnapshotRegionProcedure af79d9ce777258b19189eb2d2a51bfa6 in 172 msec 2024-12-02T23:11:33,194 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=52, resume processing ppid=50 2024-12-02T23:11:33,194 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=52, ppid=50, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e4f24037e0492bd7433eae013e6abc0e in 172 msec 2024-12-02T23:11:33,194 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T23:11:33,195 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T23:11:33,196 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T23:11:33,196 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T23:11:33,196 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:11:33,197 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-02T23:11:33,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741917_1093 (size=62) 2024-12-02T23:11:33,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741917_1093 (size=62) 2024-12-02T23:11:33,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741917_1093 (size=62) 2024-12-02T23:11:33,208 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T23:11:33,209 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithTargetName 2024-12-02T23:11:33,209 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName 2024-12-02T23:11:33,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741918_1094 (size=649) 2024-12-02T23:11:33,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741918_1094 (size=649) 2024-12-02T23:11:33,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741918_1094 (size=649) 2024-12-02T23:11:33,222 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T23:11:33,228 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T23:11:33,228 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/emptySnaptb0-testExportWithTargetName 2024-12-02T23:11:33,230 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T23:11:33,230 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 50 2024-12-02T23:11:33,232 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=50, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 234 msec 2024-12-02T23:11:33,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-02T23:11:33,312 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-12-02T23:11:33,319 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39847 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,,1733181092314.af79d9ce777258b19189eb2d2a51bfa6. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T23:11:33,321 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38049 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,1,1733181092314.e4f24037e0492bd7433eae013e6abc0e. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T23:11:33,324 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-02T23:11:33,328 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-12-02T23:11:33,328 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1733181092314.af79d9ce777258b19189eb2d2a51bfa6. 2024-12-02T23:11:33,328 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T23:11:33,330 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-02T23:11:33,335 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-02T23:11:33,341 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-02T23:11:33,344 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-02T23:11:33,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733181093344 (current time:1733181093344). 2024-12-02T23:11:33,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T23:11:33,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-02T23:11:33,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T23:11:33,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b1f7126, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:11:33,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:11:33,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:11:33,346 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:11:33,346 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:11:33,347 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:11:33,347 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@aa7d547, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:11:33,347 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:11:33,347 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:11:33,347 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:11:33,348 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40886, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:11:33,348 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45aab96, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:11:33,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:11:33,349 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:11:33,350 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:11:33,350 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38484, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:11:33,351 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513. 2024-12-02T23:11:33,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:11:33,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:11:33,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:11:33,352 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:11:33,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b2ffd91, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:11:33,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:11:33,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:11:33,353 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:11:33,353 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:11:33,353 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:11:33,354 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d42e661, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:11:33,354 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:11:33,354 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:11:33,354 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:11:33,354 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40910, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:11:33,355 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f95cc56, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:11:33,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:11:33,356 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:11:33,357 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:11:33,357 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38498, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:11:33,359 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b., hostname=9e5ab3b623d7,41311,1733180945828, seqNum=2] 2024-12-02T23:11:33,360 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:11:33,360 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54284, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:11:33,361 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513. 2024-12-02T23:11:33,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:11:33,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:11:33,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:11:33,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-02T23:11:33,362 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:11:33,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T23:11:33,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=53, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-02T23:11:33,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 53 2024-12-02T23:11:33,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-02T23:11:33,364 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T23:11:33,365 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T23:11:33,368 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T23:11:33,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741919_1095 (size=162) 2024-12-02T23:11:33,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741919_1095 (size=162) 2024-12-02T23:11:33,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741919_1095 (size=162) 2024-12-02T23:11:33,386 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T23:11:33,386 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure af79d9ce777258b19189eb2d2a51bfa6}, {pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e4f24037e0492bd7433eae013e6abc0e}] 2024-12-02T23:11:33,388 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure af79d9ce777258b19189eb2d2a51bfa6 2024-12-02T23:11:33,388 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e4f24037e0492bd7433eae013e6abc0e 2024-12-02T23:11:33,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-02T23:11:33,541 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39847 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=54 2024-12-02T23:11:33,541 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38049 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=55 2024-12-02T23:11:33,541 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1733181092314.e4f24037e0492bd7433eae013e6abc0e. 2024-12-02T23:11:33,542 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(2902): Flushing e4f24037e0492bd7433eae013e6abc0e 1/1 column families, dataSize=2.87 KB heapSize=6.44 KB 2024-12-02T23:11:33,542 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1733181092314.af79d9ce777258b19189eb2d2a51bfa6. 2024-12-02T23:11:33,542 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(2902): Flushing af79d9ce777258b19189eb2d2a51bfa6 1/1 column families, dataSize=400 B heapSize=1.09 KB 2024-12-02T23:11:33,583 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T23:11:33,598 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241202ef42b8a1df634cc9a69bf28efd7e21ad_e4f24037e0492bd7433eae013e6abc0e is 71, key is 23f91b8e9ba2242d80d72b217df44109/cf:q/1733181093321/Put/seqid=0 2024-12-02T23:11:33,601 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412022cf267244f9b48709dfb42757c48846b_af79d9ce777258b19189eb2d2a51bfa6 is 71, key is 07908397fa60058e596f23bfafe307c4/cf:q/1733181093319/Put/seqid=0 2024-12-02T23:11:33,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741920_1096 (size=7962) 2024-12-02T23:11:33,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741920_1096 (size=7962) 2024-12-02T23:11:33,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741920_1096 (size=7962) 2024-12-02T23:11:33,626 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:11:33,633 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241202ef42b8a1df634cc9a69bf28efd7e21ad_e4f24037e0492bd7433eae013e6abc0e to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b20241202ef42b8a1df634cc9a69bf28efd7e21ad_e4f24037e0492bd7433eae013e6abc0e 2024-12-02T23:11:33,634 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithTargetName/e4f24037e0492bd7433eae013e6abc0e/.tmp/cf/8e6ed1cdda994fd5b80bb4adc9fcec11, store: [table=testtb-testExportWithTargetName family=cf region=e4f24037e0492bd7433eae013e6abc0e] 2024-12-02T23:11:33,635 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithTargetName/e4f24037e0492bd7433eae013e6abc0e/.tmp/cf/8e6ed1cdda994fd5b80bb4adc9fcec11 is 208, key is 1deb52cedec551d613145ba5892af6a75/cf:q/1733181093321/Put/seqid=0 2024-12-02T23:11:33,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741921_1097 (size=5312) 2024-12-02T23:11:33,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741921_1097 (size=5312) 2024-12-02T23:11:33,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741921_1097 (size=5312) 2024-12-02T23:11:33,653 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:11:33,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741922_1098 (size=14339) 2024-12-02T23:11:33,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741922_1098 (size=14339) 2024-12-02T23:11:33,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741922_1098 (size=14339) 2024-12-02T23:11:33,673 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithTargetName/e4f24037e0492bd7433eae013e6abc0e/.tmp/cf/8e6ed1cdda994fd5b80bb4adc9fcec11 2024-12-02T23:11:33,676 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412022cf267244f9b48709dfb42757c48846b_af79d9ce777258b19189eb2d2a51bfa6 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e202412022cf267244f9b48709dfb42757c48846b_af79d9ce777258b19189eb2d2a51bfa6 2024-12-02T23:11:33,678 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithTargetName/af79d9ce777258b19189eb2d2a51bfa6/.tmp/cf/2905a0caa5de449f800efda373d4f978, store: [table=testtb-testExportWithTargetName family=cf region=af79d9ce777258b19189eb2d2a51bfa6] 2024-12-02T23:11:33,679 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithTargetName/af79d9ce777258b19189eb2d2a51bfa6/.tmp/cf/2905a0caa5de449f800efda373d4f978 is 208, key is 059b383ddf5f095ea021d5360f17f1af1/cf:q/1733181093319/Put/seqid=0 2024-12-02T23:11:33,680 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithTargetName/e4f24037e0492bd7433eae013e6abc0e/.tmp/cf/8e6ed1cdda994fd5b80bb4adc9fcec11 as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithTargetName/e4f24037e0492bd7433eae013e6abc0e/cf/8e6ed1cdda994fd5b80bb4adc9fcec11 2024-12-02T23:11:33,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-02T23:11:33,687 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithTargetName/e4f24037e0492bd7433eae013e6abc0e/cf/8e6ed1cdda994fd5b80bb4adc9fcec11, entries=44, sequenceid=6, filesize=14.0 K 2024-12-02T23:11:33,688 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(3140): Finished flush of dataSize ~2.87 KB/2936, heapSize ~6.42 KB/6576, currentSize=0 B/0 for e4f24037e0492bd7433eae013e6abc0e in 147ms, sequenceid=6, compaction requested=false 2024-12-02T23:11:33,688 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-12-02T23:11:33,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(2603): Flush status journal for e4f24037e0492bd7433eae013e6abc0e: 2024-12-02T23:11:33,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1733181092314.e4f24037e0492bd7433eae013e6abc0e. for snaptb0-testExportWithTargetName completed. 2024-12-02T23:11:33,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1733181092314.e4f24037e0492bd7433eae013e6abc0e.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-02T23:11:33,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:11:33,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithTargetName/e4f24037e0492bd7433eae013e6abc0e/cf/8e6ed1cdda994fd5b80bb4adc9fcec11] hfiles 2024-12-02T23:11:33,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithTargetName/e4f24037e0492bd7433eae013e6abc0e/cf/8e6ed1cdda994fd5b80bb4adc9fcec11 for snapshot=snaptb0-testExportWithTargetName 2024-12-02T23:11:33,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741923_1099 (size=6526) 2024-12-02T23:11:33,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741923_1099 (size=6526) 2024-12-02T23:11:33,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741923_1099 (size=6526) 2024-12-02T23:11:33,692 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=400, hasBloomFilter=true, into tmp file hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithTargetName/af79d9ce777258b19189eb2d2a51bfa6/.tmp/cf/2905a0caa5de449f800efda373d4f978 2024-12-02T23:11:33,698 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithTargetName/af79d9ce777258b19189eb2d2a51bfa6/.tmp/cf/2905a0caa5de449f800efda373d4f978 as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithTargetName/af79d9ce777258b19189eb2d2a51bfa6/cf/2905a0caa5de449f800efda373d4f978 2024-12-02T23:11:33,703 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithTargetName/af79d9ce777258b19189eb2d2a51bfa6/cf/2905a0caa5de449f800efda373d4f978, entries=6, sequenceid=6, filesize=6.4 K 2024-12-02T23:11:33,704 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(3140): Finished flush of dataSize ~400 B/400, heapSize ~1.08 KB/1104, currentSize=0 B/0 for af79d9ce777258b19189eb2d2a51bfa6 in 162ms, sequenceid=6, compaction requested=false 2024-12-02T23:11:33,704 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(2603): Flush status journal for af79d9ce777258b19189eb2d2a51bfa6: 2024-12-02T23:11:33,704 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1733181092314.af79d9ce777258b19189eb2d2a51bfa6. for snaptb0-testExportWithTargetName completed. 2024-12-02T23:11:33,704 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1733181092314.af79d9ce777258b19189eb2d2a51bfa6.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-02T23:11:33,705 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:11:33,705 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithTargetName/af79d9ce777258b19189eb2d2a51bfa6/cf/2905a0caa5de449f800efda373d4f978] hfiles 2024-12-02T23:11:33,705 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithTargetName/af79d9ce777258b19189eb2d2a51bfa6/cf/2905a0caa5de449f800efda373d4f978 for snapshot=snaptb0-testExportWithTargetName 2024-12-02T23:11:33,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741924_1100 (size=109) 2024-12-02T23:11:33,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741924_1100 (size=109) 2024-12-02T23:11:33,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741924_1100 (size=109) 2024-12-02T23:11:33,716 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1733181092314.e4f24037e0492bd7433eae013e6abc0e. 2024-12-02T23:11:33,716 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=55 2024-12-02T23:11:33,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=55 2024-12-02T23:11:33,717 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region e4f24037e0492bd7433eae013e6abc0e 2024-12-02T23:11:33,717 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e4f24037e0492bd7433eae013e6abc0e 2024-12-02T23:11:33,722 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=55, ppid=53, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e4f24037e0492bd7433eae013e6abc0e in 333 msec 2024-12-02T23:11:33,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741925_1101 (size=109) 2024-12-02T23:11:33,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741925_1101 (size=109) 2024-12-02T23:11:33,724 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1733181092314.af79d9ce777258b19189eb2d2a51bfa6. 2024-12-02T23:11:33,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741925_1101 (size=109) 2024-12-02T23:11:33,724 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-12-02T23:11:33,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=54 2024-12-02T23:11:33,725 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region af79d9ce777258b19189eb2d2a51bfa6 2024-12-02T23:11:33,725 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure af79d9ce777258b19189eb2d2a51bfa6 2024-12-02T23:11:33,730 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=54, resume processing ppid=53 2024-12-02T23:11:33,730 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=54, ppid=53, state=SUCCESS, hasLock=false; SnapshotRegionProcedure af79d9ce777258b19189eb2d2a51bfa6 in 341 msec 2024-12-02T23:11:33,730 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T23:11:33,731 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T23:11:33,732 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T23:11:33,732 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T23:11:33,732 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:11:33,734 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b20241202ef42b8a1df634cc9a69bf28efd7e21ad_e4f24037e0492bd7433eae013e6abc0e, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e202412022cf267244f9b48709dfb42757c48846b_af79d9ce777258b19189eb2d2a51bfa6] hfiles 2024-12-02T23:11:33,734 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b20241202ef42b8a1df634cc9a69bf28efd7e21ad_e4f24037e0492bd7433eae013e6abc0e 2024-12-02T23:11:33,734 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e202412022cf267244f9b48709dfb42757c48846b_af79d9ce777258b19189eb2d2a51bfa6 2024-12-02T23:11:33,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741926_1102 (size=293) 2024-12-02T23:11:33,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741926_1102 (size=293) 2024-12-02T23:11:33,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741926_1102 (size=293) 2024-12-02T23:11:33,748 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T23:11:33,749 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithTargetName 2024-12-02T23:11:33,749 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName 2024-12-02T23:11:33,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741927_1103 (size=959) 2024-12-02T23:11:33,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741927_1103 (size=959) 2024-12-02T23:11:33,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741927_1103 (size=959) 2024-12-02T23:11:33,771 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T23:11:33,778 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T23:11:33,778 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-02T23:11:33,780 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T23:11:33,780 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 53 2024-12-02T23:11:33,781 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=53, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 418 msec 2024-12-02T23:11:33,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-02T23:11:33,992 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-12-02T23:11:33,993 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181093993 2024-12-02T23:11:33,993 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:33369, tgtDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181093993, rawTgtDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181093993, srcFsUri=hdfs://localhost:33369, srcDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:11:34,023 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:33369, inputRoot=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:11:34,023 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_71179475_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181093993, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181093993/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-02T23:11:34,025 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-02T23:11:34,030 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testExportWithTargetName to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181093993/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-02T23:11:34,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741929_1105 (size=959) 2024-12-02T23:11:34,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741928_1104 (size=162) 2024-12-02T23:11:34,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741929_1105 (size=959) 2024-12-02T23:11:34,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741928_1104 (size=162) 2024-12-02T23:11:34,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741928_1104 (size=162) 2024-12-02T23:11:34,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741929_1105 (size=959) 2024-12-02T23:11:34,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741930_1106 (size=154) 2024-12-02T23:11:34,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741930_1106 (size=154) 2024-12-02T23:11:34,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741930_1106 (size=154) 2024-12-02T23:11:34,055 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:11:34,056 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:11:34,056 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:11:35,176 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop-15004590070597797605.jar 2024-12-02T23:11:35,177 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:11:35,177 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:11:35,202 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-02T23:11:35,202 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName Metrics about Tables on a single HBase RegionServer 2024-12-02T23:11:35,203 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:35,204 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T23:11:35,257 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop-17211324098257067483.jar 2024-12-02T23:11:35,258 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:11:35,258 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:11:35,259 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:11:35,259 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:11:35,259 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:11:35,260 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:11:35,260 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-02T23:11:35,260 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-02T23:11:35,261 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-02T23:11:35,261 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-02T23:11:35,261 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-02T23:11:35,262 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-02T23:11:35,262 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-02T23:11:35,262 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-02T23:11:35,263 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-02T23:11:35,263 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-02T23:11:35,264 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-02T23:11:35,264 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:11:35,265 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:11:35,265 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T23:11:35,265 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:11:35,266 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:11:35,266 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T23:11:35,266 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T23:11:35,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741931_1107 (size=131440) 2024-12-02T23:11:35,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741931_1107 (size=131440) 2024-12-02T23:11:35,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741931_1107 (size=131440) 2024-12-02T23:11:35,372 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0001_000001 (auth:SIMPLE) from 127.0.0.1:56522 2024-12-02T23:11:35,384 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_0/usercache/jenkins/appcache/application_1733180955226_0001/container_1733180955226_0001_01_000001/launch_container.sh] 2024-12-02T23:11:35,384 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_0/usercache/jenkins/appcache/application_1733180955226_0001/container_1733180955226_0001_01_000001/container_tokens] 2024-12-02T23:11:35,384 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_0/usercache/jenkins/appcache/application_1733180955226_0001/container_1733180955226_0001_01_000001/sysfs] 2024-12-02T23:11:35,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741932_1108 (size=4188619) 2024-12-02T23:11:35,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741932_1108 (size=4188619) 2024-12-02T23:11:35,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741932_1108 (size=4188619) 2024-12-02T23:11:35,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741933_1109 (size=1323991) 2024-12-02T23:11:35,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741933_1109 (size=1323991) 2024-12-02T23:11:35,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741933_1109 (size=1323991) 2024-12-02T23:11:35,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741934_1110 (size=903926) 2024-12-02T23:11:35,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741934_1110 (size=903926) 2024-12-02T23:11:35,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741934_1110 (size=903926) 2024-12-02T23:11:35,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741935_1111 (size=8360083) 2024-12-02T23:11:35,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741935_1111 (size=8360083) 2024-12-02T23:11:35,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741935_1111 (size=8360083) 2024-12-02T23:11:35,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741936_1112 (size=1877034) 2024-12-02T23:11:35,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741936_1112 (size=1877034) 2024-12-02T23:11:35,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741936_1112 (size=1877034) 2024-12-02T23:11:35,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741937_1113 (size=6424743) 2024-12-02T23:11:35,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741937_1113 (size=6424743) 2024-12-02T23:11:35,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741937_1113 (size=6424743) 2024-12-02T23:11:35,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741938_1114 (size=77835) 2024-12-02T23:11:35,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741938_1114 (size=77835) 2024-12-02T23:11:35,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741938_1114 (size=77835) 2024-12-02T23:11:35,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741939_1115 (size=30949) 2024-12-02T23:11:35,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741939_1115 (size=30949) 2024-12-02T23:11:35,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741939_1115 (size=30949) 2024-12-02T23:11:35,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741940_1116 (size=1597213) 2024-12-02T23:11:35,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741940_1116 (size=1597213) 2024-12-02T23:11:35,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741940_1116 (size=1597213) 2024-12-02T23:11:35,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741941_1117 (size=4695811) 2024-12-02T23:11:35,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741941_1117 (size=4695811) 2024-12-02T23:11:35,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741941_1117 (size=4695811) 2024-12-02T23:11:35,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741942_1118 (size=232957) 2024-12-02T23:11:35,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741942_1118 (size=232957) 2024-12-02T23:11:35,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741942_1118 (size=232957) 2024-12-02T23:11:35,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741943_1119 (size=127628) 2024-12-02T23:11:35,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741943_1119 (size=127628) 2024-12-02T23:11:35,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741943_1119 (size=127628) 2024-12-02T23:11:36,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741944_1120 (size=20406) 2024-12-02T23:11:36,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741944_1120 (size=20406) 2024-12-02T23:11:36,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741944_1120 (size=20406) 2024-12-02T23:11:36,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741945_1121 (size=5175431) 2024-12-02T23:11:36,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741945_1121 (size=5175431) 2024-12-02T23:11:36,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741945_1121 (size=5175431) 2024-12-02T23:11:36,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741946_1122 (size=217634) 2024-12-02T23:11:36,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741946_1122 (size=217634) 2024-12-02T23:11:36,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741946_1122 (size=217634) 2024-12-02T23:11:36,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741947_1123 (size=443172) 2024-12-02T23:11:36,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741947_1123 (size=443172) 2024-12-02T23:11:36,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741947_1123 (size=443172) 2024-12-02T23:11:36,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741948_1124 (size=1832290) 2024-12-02T23:11:36,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741948_1124 (size=1832290) 2024-12-02T23:11:36,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741948_1124 (size=1832290) 2024-12-02T23:11:36,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741949_1125 (size=322274) 2024-12-02T23:11:36,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741949_1125 (size=322274) 2024-12-02T23:11:36,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741949_1125 (size=322274) 2024-12-02T23:11:36,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741950_1126 (size=503880) 2024-12-02T23:11:36,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741950_1126 (size=503880) 2024-12-02T23:11:36,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741950_1126 (size=503880) 2024-12-02T23:11:36,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741951_1127 (size=29229) 2024-12-02T23:11:36,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741951_1127 (size=29229) 2024-12-02T23:11:36,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741951_1127 (size=29229) 2024-12-02T23:11:36,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741952_1128 (size=24096) 2024-12-02T23:11:36,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741952_1128 (size=24096) 2024-12-02T23:11:36,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741952_1128 (size=24096) 2024-12-02T23:11:36,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741953_1129 (size=111872) 2024-12-02T23:11:36,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741953_1129 (size=111872) 2024-12-02T23:11:36,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741953_1129 (size=111872) 2024-12-02T23:11:36,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741954_1130 (size=45609) 2024-12-02T23:11:36,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741954_1130 (size=45609) 2024-12-02T23:11:36,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741954_1130 (size=45609) 2024-12-02T23:11:36,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741955_1131 (size=136454) 2024-12-02T23:11:36,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741955_1131 (size=136454) 2024-12-02T23:11:36,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741955_1131 (size=136454) 2024-12-02T23:11:36,132 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-02T23:11:36,135 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithTargetName' hfile list 2024-12-02T23:11:36,137 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.0 K 2024-12-02T23:11:36,137 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=7.8 K 2024-12-02T23:11:36,137 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=6.4 K 2024-12-02T23:11:36,137 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.2 K 2024-12-02T23:11:36,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741956_1132 (size=1031) 2024-12-02T23:11:36,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741956_1132 (size=1031) 2024-12-02T23:11:36,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741956_1132 (size=1031) 2024-12-02T23:11:36,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741957_1133 (size=35) 2024-12-02T23:11:36,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741957_1133 (size=35) 2024-12-02T23:11:36,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741957_1133 (size=35) 2024-12-02T23:11:36,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741958_1134 (size=304003) 2024-12-02T23:11:36,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741958_1134 (size=304003) 2024-12-02T23:11:36,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741958_1134 (size=304003) 2024-12-02T23:11:36,209 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T23:11:36,209 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T23:11:36,393 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0002_000001 (auth:SIMPLE) from 127.0.0.1:56534 2024-12-02T23:11:37,055 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T23:11:44,751 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0002_000001 (auth:SIMPLE) from 127.0.0.1:39880 2024-12-02T23:11:45,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741959_1135 (size=349701) 2024-12-02T23:11:45,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741959_1135 (size=349701) 2024-12-02T23:11:45,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741959_1135 (size=349701) 2024-12-02T23:11:47,053 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0002_000001 (auth:SIMPLE) from 127.0.0.1:48454 2024-12-02T23:11:47,053 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0002_000001 (auth:SIMPLE) from 127.0.0.1:52614 2024-12-02T23:11:47,895 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0002_000001 (auth:SIMPLE) from 127.0.0.1:48466 2024-12-02T23:11:47,924 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0002_000001 (auth:SIMPLE) from 127.0.0.1:52626 2024-12-02T23:11:49,941 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733180955226_0002_01_000006 while processing FINISH_CONTAINERS event 2024-12-02T23:11:55,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741960_1136 (size=7962) 2024-12-02T23:11:55,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741960_1136 (size=7962) 2024-12-02T23:11:55,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741960_1136 (size=7962) 2024-12-02T23:11:55,883 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_2/usercache/jenkins/appcache/application_1733180955226_0002/container_1733180955226_0002_01_000003/launch_container.sh] 2024-12-02T23:11:55,884 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_2/usercache/jenkins/appcache/application_1733180955226_0002/container_1733180955226_0002_01_000003/container_tokens] 2024-12-02T23:11:55,884 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_2/usercache/jenkins/appcache/application_1733180955226_0002/container_1733180955226_0002_01_000003/sysfs] 2024-12-02T23:11:57,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741962_1138 (size=5312) 2024-12-02T23:11:57,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741962_1138 (size=5312) 2024-12-02T23:11:57,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741962_1138 (size=5312) 2024-12-02T23:11:58,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741963_1139 (size=6526) 2024-12-02T23:11:58,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741963_1139 (size=6526) 2024-12-02T23:11:58,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741963_1139 (size=6526) 2024-12-02T23:11:58,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741964_1140 (size=14339) 2024-12-02T23:11:58,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741964_1140 (size=14339) 2024-12-02T23:11:58,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741964_1140 (size=14339) 2024-12-02T23:11:58,824 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_0/usercache/jenkins/appcache/application_1733180955226_0002/container_1733180955226_0002_01_000004/launch_container.sh] 2024-12-02T23:11:58,824 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_0/usercache/jenkins/appcache/application_1733180955226_0002/container_1733180955226_0002_01_000004/container_tokens] 2024-12-02T23:11:58,824 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_0/usercache/jenkins/appcache/application_1733180955226_0002/container_1733180955226_0002_01_000004/sysfs] 2024-12-02T23:11:58,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741961_1137 (size=31747) 2024-12-02T23:11:58,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741961_1137 (size=31747) 2024-12-02T23:11:58,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741961_1137 (size=31747) 2024-12-02T23:11:58,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741965_1141 (size=465) 2024-12-02T23:11:58,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741965_1141 (size=465) 2024-12-02T23:11:58,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741965_1141 (size=465) 2024-12-02T23:11:58,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741966_1142 (size=31747) 2024-12-02T23:11:58,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741966_1142 (size=31747) 2024-12-02T23:11:58,941 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_2/usercache/jenkins/appcache/application_1733180955226_0002/container_1733180955226_0002_01_000002/launch_container.sh] 2024-12-02T23:11:58,941 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_2/usercache/jenkins/appcache/application_1733180955226_0002/container_1733180955226_0002_01_000002/container_tokens] 2024-12-02T23:11:58,941 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_2/usercache/jenkins/appcache/application_1733180955226_0002/container_1733180955226_0002_01_000002/sysfs] 2024-12-02T23:11:58,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741966_1142 (size=31747) 2024-12-02T23:11:58,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741967_1143 (size=349701) 2024-12-02T23:11:58,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741967_1143 (size=349701) 2024-12-02T23:11:58,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741967_1143 (size=349701) 2024-12-02T23:11:58,981 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0002_000001 (auth:SIMPLE) from 127.0.0.1:50234 2024-12-02T23:12:00,532 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-02T23:12:00,546 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-02T23:12:00,614 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: testExportWithTargetName 2024-12-02T23:12:00,614 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-02T23:12:00,618 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-02T23:12:00,618 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_71179475_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testExportWithTargetName at hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-02T23:12:00,622 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testExportWithTargetName/.snapshotinfo 2024-12-02T23:12:00,623 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testExportWithTargetName/data.manifest 2024-12-02T23:12:00,623 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_71179475_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181093993/.hbase-snapshot/testExportWithTargetName at hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181093993/.hbase-snapshot/testExportWithTargetName 2024-12-02T23:12:00,623 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181093993/.hbase-snapshot/testExportWithTargetName/.snapshotinfo 2024-12-02T23:12:00,624 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181093993/.hbase-snapshot/testExportWithTargetName/data.manifest 2024-12-02T23:12:00,645 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testExportWithTargetName 2024-12-02T23:12:00,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=56, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName 2024-12-02T23:12:00,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-02T23:12:00,654 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181120654"}]},"ts":"1733181120654"} 2024-12-02T23:12:00,658 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLING in hbase:meta 2024-12-02T23:12:00,659 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithTargetName to state=DISABLING 2024-12-02T23:12:00,660 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=57, ppid=56, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName}] 2024-12-02T23:12:00,663 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=af79d9ce777258b19189eb2d2a51bfa6, UNASSIGN}, {pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e4f24037e0492bd7433eae013e6abc0e, UNASSIGN}] 2024-12-02T23:12:00,664 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e4f24037e0492bd7433eae013e6abc0e, UNASSIGN 2024-12-02T23:12:00,666 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=af79d9ce777258b19189eb2d2a51bfa6, UNASSIGN 2024-12-02T23:12:00,667 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=e4f24037e0492bd7433eae013e6abc0e, regionState=CLOSING, regionLocation=9e5ab3b623d7,38049,1733180945779 2024-12-02T23:12:00,668 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=58 updating hbase:meta row=af79d9ce777258b19189eb2d2a51bfa6, regionState=CLOSING, regionLocation=9e5ab3b623d7,39847,1733180945622 2024-12-02T23:12:00,677 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e4f24037e0492bd7433eae013e6abc0e, UNASSIGN because future has completed 2024-12-02T23:12:00,678 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T23:12:00,678 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure e4f24037e0492bd7433eae013e6abc0e, server=9e5ab3b623d7,38049,1733180945779}] 2024-12-02T23:12:00,678 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=af79d9ce777258b19189eb2d2a51bfa6, UNASSIGN because future has completed 2024-12-02T23:12:00,683 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T23:12:00,683 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=61, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure af79d9ce777258b19189eb2d2a51bfa6, server=9e5ab3b623d7,39847,1733180945622}] 2024-12-02T23:12:00,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-02T23:12:00,838 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(122): Close e4f24037e0492bd7433eae013e6abc0e 2024-12-02T23:12:00,838 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T23:12:00,838 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1722): Closing e4f24037e0492bd7433eae013e6abc0e, disabling compactions & flushes 2024-12-02T23:12:00,838 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1733181092314.e4f24037e0492bd7433eae013e6abc0e. 2024-12-02T23:12:00,838 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1733181092314.e4f24037e0492bd7433eae013e6abc0e. 2024-12-02T23:12:00,838 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1733181092314.e4f24037e0492bd7433eae013e6abc0e. after waiting 0 ms 2024-12-02T23:12:00,838 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1733181092314.e4f24037e0492bd7433eae013e6abc0e. 2024-12-02T23:12:00,842 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(122): Close af79d9ce777258b19189eb2d2a51bfa6 2024-12-02T23:12:00,842 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T23:12:00,842 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1722): Closing af79d9ce777258b19189eb2d2a51bfa6, disabling compactions & flushes 2024-12-02T23:12:00,842 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1733181092314.af79d9ce777258b19189eb2d2a51bfa6. 2024-12-02T23:12:00,842 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1733181092314.af79d9ce777258b19189eb2d2a51bfa6. 2024-12-02T23:12:00,843 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1733181092314.af79d9ce777258b19189eb2d2a51bfa6. after waiting 0 ms 2024-12-02T23:12:00,843 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1733181092314.af79d9ce777258b19189eb2d2a51bfa6. 2024-12-02T23:12:00,845 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithTargetName/e4f24037e0492bd7433eae013e6abc0e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T23:12:00,845 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:12:00,845 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1733181092314.e4f24037e0492bd7433eae013e6abc0e. 2024-12-02T23:12:00,845 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1676): Region close journal for e4f24037e0492bd7433eae013e6abc0e: Waiting for close lock at 1733181120838Running coprocessor pre-close hooks at 1733181120838Disabling compacts and flushes for region at 1733181120838Disabling writes for close at 1733181120838Writing region close event to WAL at 1733181120839 (+1 ms)Running coprocessor post-close hooks at 1733181120845 (+6 ms)Closed at 1733181120845 2024-12-02T23:12:00,847 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(157): Closed e4f24037e0492bd7433eae013e6abc0e 2024-12-02T23:12:00,848 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=e4f24037e0492bd7433eae013e6abc0e, regionState=CLOSED 2024-12-02T23:12:00,850 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=60, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure e4f24037e0492bd7433eae013e6abc0e, server=9e5ab3b623d7,38049,1733180945779 because future has completed 2024-12-02T23:12:00,854 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=60, resume processing ppid=59 2024-12-02T23:12:00,854 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=60, ppid=59, state=SUCCESS, hasLock=false; CloseRegionProcedure e4f24037e0492bd7433eae013e6abc0e, server=9e5ab3b623d7,38049,1733180945779 in 173 msec 2024-12-02T23:12:00,855 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=59, ppid=57, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e4f24037e0492bd7433eae013e6abc0e, UNASSIGN in 191 msec 2024-12-02T23:12:00,856 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithTargetName/af79d9ce777258b19189eb2d2a51bfa6/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T23:12:00,856 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:12:00,856 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1733181092314.af79d9ce777258b19189eb2d2a51bfa6. 2024-12-02T23:12:00,856 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1676): Region close journal for af79d9ce777258b19189eb2d2a51bfa6: Waiting for close lock at 1733181120842Running coprocessor pre-close hooks at 1733181120842Disabling compacts and flushes for region at 1733181120842Disabling writes for close at 1733181120843 (+1 ms)Writing region close event to WAL at 1733181120844 (+1 ms)Running coprocessor post-close hooks at 1733181120856 (+12 ms)Closed at 1733181120856 2024-12-02T23:12:00,858 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(157): Closed af79d9ce777258b19189eb2d2a51bfa6 2024-12-02T23:12:00,859 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=58 updating hbase:meta row=af79d9ce777258b19189eb2d2a51bfa6, regionState=CLOSED 2024-12-02T23:12:00,861 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=61, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure af79d9ce777258b19189eb2d2a51bfa6, server=9e5ab3b623d7,39847,1733180945622 because future has completed 2024-12-02T23:12:00,864 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=61, resume processing ppid=58 2024-12-02T23:12:00,864 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=61, ppid=58, state=SUCCESS, hasLock=false; CloseRegionProcedure af79d9ce777258b19189eb2d2a51bfa6, server=9e5ab3b623d7,39847,1733180945622 in 179 msec 2024-12-02T23:12:00,866 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=58, resume processing ppid=57 2024-12-02T23:12:00,866 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=58, ppid=57, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=af79d9ce777258b19189eb2d2a51bfa6, UNASSIGN in 201 msec 2024-12-02T23:12:00,870 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=57, resume processing ppid=56 2024-12-02T23:12:00,870 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=57, ppid=56, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName in 206 msec 2024-12-02T23:12:00,872 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181120872"}]},"ts":"1733181120872"} 2024-12-02T23:12:00,874 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLED in hbase:meta 2024-12-02T23:12:00,874 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithTargetName to state=DISABLED 2024-12-02T23:12:00,876 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=56, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName in 230 msec 2024-12-02T23:12:00,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-02T23:12:00,974 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-02T23:12:00,975 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testExportWithTargetName 2024-12-02T23:12:00,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-02T23:12:00,979 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-02T23:12:00,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithTargetName 2024-12-02T23:12:00,980 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=62, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-02T23:12:00,988 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithTargetName/af79d9ce777258b19189eb2d2a51bfa6 2024-12-02T23:12:00,988 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41311 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithTargetName 2024-12-02T23:12:00,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-02T23:12:00,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-02T23:12:00,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-02T23:12:00,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-02T23:12:00,993 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-02T23:12:00,994 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-02T23:12:00,994 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-02T23:12:00,994 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-02T23:12:00,994 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithTargetName/af79d9ce777258b19189eb2d2a51bfa6/cf, FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithTargetName/af79d9ce777258b19189eb2d2a51bfa6/recovered.edits] 2024-12-02T23:12:00,995 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-02T23:12:00,995 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:12:00,995 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-02T23:12:00,995 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:12:00,995 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-02T23:12:00,995 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:12:00,996 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-02T23:12:00,996 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithTargetName/e4f24037e0492bd7433eae013e6abc0e 2024-12-02T23:12:00,996 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:12:00,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=62 2024-12-02T23:12:01,000 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithTargetName/af79d9ce777258b19189eb2d2a51bfa6/cf/2905a0caa5de449f800efda373d4f978 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportWithTargetName/af79d9ce777258b19189eb2d2a51bfa6/cf/2905a0caa5de449f800efda373d4f978 2024-12-02T23:12:01,003 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithTargetName/e4f24037e0492bd7433eae013e6abc0e/cf, FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithTargetName/e4f24037e0492bd7433eae013e6abc0e/recovered.edits] 2024-12-02T23:12:01,008 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithTargetName/e4f24037e0492bd7433eae013e6abc0e/cf/8e6ed1cdda994fd5b80bb4adc9fcec11 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportWithTargetName/e4f24037e0492bd7433eae013e6abc0e/cf/8e6ed1cdda994fd5b80bb4adc9fcec11 2024-12-02T23:12:01,009 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithTargetName/af79d9ce777258b19189eb2d2a51bfa6/recovered.edits/9.seqid to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportWithTargetName/af79d9ce777258b19189eb2d2a51bfa6/recovered.edits/9.seqid 2024-12-02T23:12:01,010 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithTargetName/af79d9ce777258b19189eb2d2a51bfa6 2024-12-02T23:12:01,013 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithTargetName/e4f24037e0492bd7433eae013e6abc0e/recovered.edits/9.seqid to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportWithTargetName/e4f24037e0492bd7433eae013e6abc0e/recovered.edits/9.seqid 2024-12-02T23:12:01,014 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithTargetName/e4f24037e0492bd7433eae013e6abc0e 2024-12-02T23:12:01,014 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithTargetName regions 2024-12-02T23:12:01,015 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71 2024-12-02T23:12:01,016 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf] 2024-12-02T23:12:01,021 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b20241202ef42b8a1df634cc9a69bf28efd7e21ad_e4f24037e0492bd7433eae013e6abc0e to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b20241202ef42b8a1df634cc9a69bf28efd7e21ad_e4f24037e0492bd7433eae013e6abc0e 2024-12-02T23:12:01,024 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e202412022cf267244f9b48709dfb42757c48846b_af79d9ce777258b19189eb2d2a51bfa6 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e202412022cf267244f9b48709dfb42757c48846b_af79d9ce777258b19189eb2d2a51bfa6 2024-12-02T23:12:01,025 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71 2024-12-02T23:12:01,028 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=62, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-02T23:12:01,032 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithTargetName from hbase:meta 2024-12-02T23:12:01,036 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithTargetName' descriptor. 2024-12-02T23:12:01,037 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=62, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-02T23:12:01,037 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithTargetName' from region states. 2024-12-02T23:12:01,038 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,,1733181092314.af79d9ce777258b19189eb2d2a51bfa6.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733181121037"}]},"ts":"9223372036854775807"} 2024-12-02T23:12:01,038 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,1,1733181092314.e4f24037e0492bd7433eae013e6abc0e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733181121037"}]},"ts":"9223372036854775807"} 2024-12-02T23:12:01,041 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-02T23:12:01,041 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => af79d9ce777258b19189eb2d2a51bfa6, NAME => 'testtb-testExportWithTargetName,,1733181092314.af79d9ce777258b19189eb2d2a51bfa6.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => e4f24037e0492bd7433eae013e6abc0e, NAME => 'testtb-testExportWithTargetName,1,1733181092314.e4f24037e0492bd7433eae013e6abc0e.', STARTKEY => '1', ENDKEY => ''}] 2024-12-02T23:12:01,041 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithTargetName' as deleted. 2024-12-02T23:12:01,041 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733181121041"}]},"ts":"9223372036854775807"} 2024-12-02T23:12:01,043 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithTargetName state from META 2024-12-02T23:12:01,044 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=62, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-02T23:12:01,046 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=62, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName in 69 msec 2024-12-02T23:12:01,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=62 2024-12-02T23:12:01,106 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithTargetName 2024-12-02T23:12:01,106 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-02T23:12:01,115 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testExportWithTargetName" type: DISABLED 2024-12-02T23:12:01,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithTargetName 2024-12-02T23:12:01,121 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testExportWithTargetName" type: DISABLED 2024-12-02T23:12:01,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithTargetName 2024-12-02T23:12:01,173 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithTargetName Thread=781 (was 752) Potentially hanging thread: IPC Client (243496960) connection to localhost/127.0.0.1:33317 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2008973204_1 at /127.0.0.1:56874 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-2008973204_1 at /127.0.0.1:46690 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 25124) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:32795 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2075 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.getContainerPid(ContainerLaunch.java:1062) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerCleanup.run(ContainerCleanup.java:119) java.base@17.0.11/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33317 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_71179475_22 at /127.0.0.1:56884 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_71179475_22 at /127.0.0.1:57104 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_71179475_22 at /127.0.0.1:46724 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:32795 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=789 (was 773) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=796 (was 681) - SystemLoadAverage LEAK? -, ProcessCount=19 (was 19), AvailableMemoryMB=2657 (was 558) - AvailableMemoryMB LEAK? - 2024-12-02T23:12:01,174 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=781 is superior to 500 2024-12-02T23:12:01,201 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithResetTtl Thread=781, OpenFileDescriptor=789, MaxFileDescriptor=1048576, SystemLoadAverage=796, ProcessCount=19, AvailableMemoryMB=2647 2024-12-02T23:12:01,202 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=781 is superior to 500 2024-12-02T23:12:01,204 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T23:12:01,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl 2024-12-02T23:12:01,207 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T23:12:01,209 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T23:12:01,211 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithResetTtl" procId is: 63 2024-12-02T23:12:01,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-02T23:12:01,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741968_1144 (size=440) 2024-12-02T23:12:01,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741968_1144 (size=440) 2024-12-02T23:12:01,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741968_1144 (size=440) 2024-12-02T23:12:01,288 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 876889227da0811eedb8889a1116e0f3, NAME => 'testtb-testExportWithResetTtl,,1733181121203.876889227da0811eedb8889a1116e0f3.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:12:01,290 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => ace763b190e7e1c4038a215dd6005521, NAME => 'testtb-testExportWithResetTtl,1,1733181121203.ace763b190e7e1c4038a215dd6005521.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:12:01,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-02T23:12:01,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741970_1146 (size=65) 2024-12-02T23:12:01,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741970_1146 (size=65) 2024-12-02T23:12:01,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741970_1146 (size=65) 2024-12-02T23:12:01,346 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1733181121203.ace763b190e7e1c4038a215dd6005521.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:12:01,346 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing ace763b190e7e1c4038a215dd6005521, disabling compactions & flushes 2024-12-02T23:12:01,346 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1733181121203.ace763b190e7e1c4038a215dd6005521. 2024-12-02T23:12:01,346 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1733181121203.ace763b190e7e1c4038a215dd6005521. 2024-12-02T23:12:01,346 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1733181121203.ace763b190e7e1c4038a215dd6005521. after waiting 0 ms 2024-12-02T23:12:01,346 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1733181121203.ace763b190e7e1c4038a215dd6005521. 2024-12-02T23:12:01,346 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1733181121203.ace763b190e7e1c4038a215dd6005521. 2024-12-02T23:12:01,346 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for ace763b190e7e1c4038a215dd6005521: Waiting for close lock at 1733181121346Disabling compacts and flushes for region at 1733181121346Disabling writes for close at 1733181121346Writing region close event to WAL at 1733181121346Closed at 1733181121346 2024-12-02T23:12:01,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741969_1145 (size=65) 2024-12-02T23:12:01,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741969_1145 (size=65) 2024-12-02T23:12:01,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741969_1145 (size=65) 2024-12-02T23:12:01,355 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1733181121203.876889227da0811eedb8889a1116e0f3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:12:01,355 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing 876889227da0811eedb8889a1116e0f3, disabling compactions & flushes 2024-12-02T23:12:01,355 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1733181121203.876889227da0811eedb8889a1116e0f3. 2024-12-02T23:12:01,355 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1733181121203.876889227da0811eedb8889a1116e0f3. 2024-12-02T23:12:01,355 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1733181121203.876889227da0811eedb8889a1116e0f3. after waiting 0 ms 2024-12-02T23:12:01,355 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1733181121203.876889227da0811eedb8889a1116e0f3. 2024-12-02T23:12:01,355 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1733181121203.876889227da0811eedb8889a1116e0f3. 2024-12-02T23:12:01,355 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 876889227da0811eedb8889a1116e0f3: Waiting for close lock at 1733181121355Disabling compacts and flushes for region at 1733181121355Disabling writes for close at 1733181121355Writing region close event to WAL at 1733181121355Closed at 1733181121355 2024-12-02T23:12:01,356 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T23:12:01,357 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,1,1733181121203.ace763b190e7e1c4038a215dd6005521.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733181121356"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733181121356"}]},"ts":"1733181121356"} 2024-12-02T23:12:01,357 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,,1733181121203.876889227da0811eedb8889a1116e0f3.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733181121356"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733181121356"}]},"ts":"1733181121356"} 2024-12-02T23:12:01,360 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T23:12:01,364 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T23:12:01,365 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181121364"}]},"ts":"1733181121364"} 2024-12-02T23:12:01,367 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-02T23:12:01,368 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {9e5ab3b623d7=0} racks are {/default-rack=0} 2024-12-02T23:12:01,372 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T23:12:01,372 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T23:12:01,372 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T23:12:01,372 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T23:12:01,372 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T23:12:01,372 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T23:12:01,373 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T23:12:01,373 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T23:12:01,373 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T23:12:01,373 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T23:12:01,373 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=876889227da0811eedb8889a1116e0f3, ASSIGN}, {pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=ace763b190e7e1c4038a215dd6005521, ASSIGN}] 2024-12-02T23:12:01,378 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=876889227da0811eedb8889a1116e0f3, ASSIGN 2024-12-02T23:12:01,378 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=ace763b190e7e1c4038a215dd6005521, ASSIGN 2024-12-02T23:12:01,379 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=ace763b190e7e1c4038a215dd6005521, ASSIGN; state=OFFLINE, location=9e5ab3b623d7,38049,1733180945779; forceNewPlan=false, retain=false 2024-12-02T23:12:01,380 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=876889227da0811eedb8889a1116e0f3, ASSIGN; state=OFFLINE, location=9e5ab3b623d7,41311,1733180945828; forceNewPlan=false, retain=false 2024-12-02T23:12:01,530 INFO [9e5ab3b623d7:41513 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T23:12:01,530 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=64 updating hbase:meta row=876889227da0811eedb8889a1116e0f3, regionState=OPENING, regionLocation=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:12:01,530 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=65 updating hbase:meta row=ace763b190e7e1c4038a215dd6005521, regionState=OPENING, regionLocation=9e5ab3b623d7,38049,1733180945779 2024-12-02T23:12:01,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-02T23:12:01,534 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=876889227da0811eedb8889a1116e0f3, ASSIGN because future has completed 2024-12-02T23:12:01,534 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=66, ppid=64, state=RUNNABLE, hasLock=false; OpenRegionProcedure 876889227da0811eedb8889a1116e0f3, server=9e5ab3b623d7,41311,1733180945828}] 2024-12-02T23:12:01,535 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=ace763b190e7e1c4038a215dd6005521, ASSIGN because future has completed 2024-12-02T23:12:01,536 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=67, ppid=65, state=RUNNABLE, hasLock=false; OpenRegionProcedure ace763b190e7e1c4038a215dd6005521, server=9e5ab3b623d7,38049,1733180945779}] 2024-12-02T23:12:01,692 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,,1733181121203.876889227da0811eedb8889a1116e0f3. 2024-12-02T23:12:01,692 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,1,1733181121203.ace763b190e7e1c4038a215dd6005521. 2024-12-02T23:12:01,692 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7752): Opening region: {ENCODED => 876889227da0811eedb8889a1116e0f3, NAME => 'testtb-testExportWithResetTtl,,1733181121203.876889227da0811eedb8889a1116e0f3.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T23:12:01,692 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7752): Opening region: {ENCODED => ace763b190e7e1c4038a215dd6005521, NAME => 'testtb-testExportWithResetTtl,1,1733181121203.ace763b190e7e1c4038a215dd6005521.', STARTKEY => '1', ENDKEY => ''} 2024-12-02T23:12:01,692 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,1,1733181121203.ace763b190e7e1c4038a215dd6005521. service=AccessControlService 2024-12-02T23:12:01,692 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,,1733181121203.876889227da0811eedb8889a1116e0f3. service=AccessControlService 2024-12-02T23:12:01,693 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T23:12:01,693 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T23:12:01,693 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl ace763b190e7e1c4038a215dd6005521 2024-12-02T23:12:01,693 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 876889227da0811eedb8889a1116e0f3 2024-12-02T23:12:01,693 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1733181121203.ace763b190e7e1c4038a215dd6005521.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:12:01,693 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1733181121203.876889227da0811eedb8889a1116e0f3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:12:01,693 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7794): checking encryption for ace763b190e7e1c4038a215dd6005521 2024-12-02T23:12:01,693 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7794): checking encryption for 876889227da0811eedb8889a1116e0f3 2024-12-02T23:12:01,693 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7797): checking classloading for ace763b190e7e1c4038a215dd6005521 2024-12-02T23:12:01,693 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7797): checking classloading for 876889227da0811eedb8889a1116e0f3 2024-12-02T23:12:01,694 INFO [StoreOpener-ace763b190e7e1c4038a215dd6005521-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ace763b190e7e1c4038a215dd6005521 2024-12-02T23:12:01,696 INFO [StoreOpener-ace763b190e7e1c4038a215dd6005521-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ace763b190e7e1c4038a215dd6005521 columnFamilyName cf 2024-12-02T23:12:01,696 INFO [StoreOpener-876889227da0811eedb8889a1116e0f3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 876889227da0811eedb8889a1116e0f3 2024-12-02T23:12:01,697 DEBUG [StoreOpener-ace763b190e7e1c4038a215dd6005521-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:12:01,698 INFO [StoreOpener-876889227da0811eedb8889a1116e0f3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 876889227da0811eedb8889a1116e0f3 columnFamilyName cf 2024-12-02T23:12:01,698 INFO [StoreOpener-ace763b190e7e1c4038a215dd6005521-1 {}] regionserver.HStore(327): Store=ace763b190e7e1c4038a215dd6005521/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T23:12:01,698 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1038): replaying wal for ace763b190e7e1c4038a215dd6005521 2024-12-02T23:12:01,698 DEBUG [StoreOpener-876889227da0811eedb8889a1116e0f3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:12:01,699 INFO [StoreOpener-876889227da0811eedb8889a1116e0f3-1 {}] regionserver.HStore(327): Store=876889227da0811eedb8889a1116e0f3/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T23:12:01,699 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithResetTtl/ace763b190e7e1c4038a215dd6005521 2024-12-02T23:12:01,699 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1038): replaying wal for 876889227da0811eedb8889a1116e0f3 2024-12-02T23:12:01,699 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithResetTtl/ace763b190e7e1c4038a215dd6005521 2024-12-02T23:12:01,700 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithResetTtl/876889227da0811eedb8889a1116e0f3 2024-12-02T23:12:01,700 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1048): stopping wal replay for ace763b190e7e1c4038a215dd6005521 2024-12-02T23:12:01,700 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1060): Cleaning up temporary data for ace763b190e7e1c4038a215dd6005521 2024-12-02T23:12:01,701 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithResetTtl/876889227da0811eedb8889a1116e0f3 2024-12-02T23:12:01,701 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1048): stopping wal replay for 876889227da0811eedb8889a1116e0f3 2024-12-02T23:12:01,701 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1060): Cleaning up temporary data for 876889227da0811eedb8889a1116e0f3 2024-12-02T23:12:01,702 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1093): writing seq id for ace763b190e7e1c4038a215dd6005521 2024-12-02T23:12:01,703 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1093): writing seq id for 876889227da0811eedb8889a1116e0f3 2024-12-02T23:12:01,705 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithResetTtl/ace763b190e7e1c4038a215dd6005521/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T23:12:01,705 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1114): Opened ace763b190e7e1c4038a215dd6005521; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71872453, jitterRate=0.07098300755023956}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T23:12:01,705 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ace763b190e7e1c4038a215dd6005521 2024-12-02T23:12:01,706 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1006): Region open journal for ace763b190e7e1c4038a215dd6005521: Running coprocessor pre-open hook at 1733181121693Writing region info on filesystem at 1733181121693Initializing all the Stores at 1733181121694 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733181121694Cleaning up temporary data from old regions at 1733181121700 (+6 ms)Running coprocessor post-open hooks at 1733181121705 (+5 ms)Region opened successfully at 1733181121706 (+1 ms) 2024-12-02T23:12:01,706 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithResetTtl/876889227da0811eedb8889a1116e0f3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T23:12:01,707 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,1,1733181121203.ace763b190e7e1c4038a215dd6005521., pid=67, masterSystemTime=1733181121689 2024-12-02T23:12:01,707 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1114): Opened 876889227da0811eedb8889a1116e0f3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68419125, jitterRate=0.019524410367012024}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T23:12:01,707 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 876889227da0811eedb8889a1116e0f3 2024-12-02T23:12:01,707 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1006): Region open journal for 876889227da0811eedb8889a1116e0f3: Running coprocessor pre-open hook at 1733181121693Writing region info on filesystem at 1733181121693Initializing all the Stores at 1733181121694 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733181121694Cleaning up temporary data from old regions at 1733181121701 (+7 ms)Running coprocessor post-open hooks at 1733181121707 (+6 ms)Region opened successfully at 1733181121707 2024-12-02T23:12:01,708 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,,1733181121203.876889227da0811eedb8889a1116e0f3., pid=66, masterSystemTime=1733181121688 2024-12-02T23:12:01,709 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,1,1733181121203.ace763b190e7e1c4038a215dd6005521. 2024-12-02T23:12:01,709 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,1,1733181121203.ace763b190e7e1c4038a215dd6005521. 2024-12-02T23:12:01,709 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=65 updating hbase:meta row=ace763b190e7e1c4038a215dd6005521, regionState=OPEN, openSeqNum=2, regionLocation=9e5ab3b623d7,38049,1733180945779 2024-12-02T23:12:01,710 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,,1733181121203.876889227da0811eedb8889a1116e0f3. 2024-12-02T23:12:01,710 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,,1733181121203.876889227da0811eedb8889a1116e0f3. 2024-12-02T23:12:01,710 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=64 updating hbase:meta row=876889227da0811eedb8889a1116e0f3, regionState=OPEN, openSeqNum=2, regionLocation=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:12:01,712 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=67, ppid=65, state=RUNNABLE, hasLock=false; OpenRegionProcedure ace763b190e7e1c4038a215dd6005521, server=9e5ab3b623d7,38049,1733180945779 because future has completed 2024-12-02T23:12:01,713 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=66, ppid=64, state=RUNNABLE, hasLock=false; OpenRegionProcedure 876889227da0811eedb8889a1116e0f3, server=9e5ab3b623d7,41311,1733180945828 because future has completed 2024-12-02T23:12:01,715 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=67, resume processing ppid=65 2024-12-02T23:12:01,715 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=67, ppid=65, state=SUCCESS, hasLock=false; OpenRegionProcedure ace763b190e7e1c4038a215dd6005521, server=9e5ab3b623d7,38049,1733180945779 in 177 msec 2024-12-02T23:12:01,717 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=66, resume processing ppid=64 2024-12-02T23:12:01,717 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=66, ppid=64, state=SUCCESS, hasLock=false; OpenRegionProcedure 876889227da0811eedb8889a1116e0f3, server=9e5ab3b623d7,41311,1733180945828 in 180 msec 2024-12-02T23:12:01,717 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=65, ppid=63, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=ace763b190e7e1c4038a215dd6005521, ASSIGN in 342 msec 2024-12-02T23:12:01,718 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=64, resume processing ppid=63 2024-12-02T23:12:01,718 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=64, ppid=63, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=876889227da0811eedb8889a1116e0f3, ASSIGN in 344 msec 2024-12-02T23:12:01,719 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T23:12:01,719 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181121719"}]},"ts":"1733181121719"} 2024-12-02T23:12:01,721 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-02T23:12:01,721 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T23:12:01,722 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithResetTtl jenkins: RWXCA 2024-12-02T23:12:01,725 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41311 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-02T23:12:01,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:12:01,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:12:01,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:12:01,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:12:01,730 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T23:12:01,730 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T23:12:01,730 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T23:12:01,730 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T23:12:01,732 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=63, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl in 526 msec 2024-12-02T23:12:01,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-02T23:12:01,843 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-02T23:12:01,843 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T23:12:01,846 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-12-02T23:12:01,846 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1733181121203.876889227da0811eedb8889a1116e0f3. 2024-12-02T23:12:01,846 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T23:12:01,848 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T23:12:01,854 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T23:12:01,858 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43546, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:12:01,861 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T23:12:01,864 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-02T23:12:01,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733181121864 (current time:1733181121864). 2024-12-02T23:12:01,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T23:12:01,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-02T23:12:01,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T23:12:01,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@734fbf1e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:01,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:12:01,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:12:01,867 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:12:01,867 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:12:01,867 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:12:01,867 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14ee3ca4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:01,868 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:12:01,868 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:12:01,868 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:01,869 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49284, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:12:01,870 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22f060c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:01,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:12:01,871 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:12:01,871 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:12:01,873 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39910, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:12:01,874 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:12:01,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:12:01,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:01,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:01,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b663699, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:01,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:12:01,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:12:01,877 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:12:01,877 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:12:01,879 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:12:01,879 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:12:01,879 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c0df0e1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:01,879 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:12:01,880 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:12:01,880 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:01,882 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49292, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:12:01,883 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@902a279, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:01,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:12:01,884 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:12:01,884 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:12:01,885 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39920, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:12:01,887 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b., hostname=9e5ab3b623d7,41311,1733180945828, seqNum=2] 2024-12-02T23:12:01,887 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:12:01,888 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43552, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:12:01,889 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:12:01,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:12:01,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:01,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:01,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-02T23:12:01,890 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:12:01,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T23:12:01,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-02T23:12:01,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-02T23:12:01,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-02T23:12:01,894 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T23:12:01,895 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T23:12:01,898 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T23:12:01,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741971_1147 (size=161) 2024-12-02T23:12:01,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741971_1147 (size=161) 2024-12-02T23:12:01,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741971_1147 (size=161) 2024-12-02T23:12:01,908 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T23:12:01,908 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 876889227da0811eedb8889a1116e0f3}, {pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ace763b190e7e1c4038a215dd6005521}] 2024-12-02T23:12:01,909 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ace763b190e7e1c4038a215dd6005521 2024-12-02T23:12:01,909 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 876889227da0811eedb8889a1116e0f3 2024-12-02T23:12:02,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-02T23:12:02,061 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38049 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=70 2024-12-02T23:12:02,061 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41311 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=69 2024-12-02T23:12:02,061 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1733181121203.ace763b190e7e1c4038a215dd6005521. 2024-12-02T23:12:02,062 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.HRegion(2603): Flush status journal for ace763b190e7e1c4038a215dd6005521: 2024-12-02T23:12:02,062 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1733181121203.ace763b190e7e1c4038a215dd6005521. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-02T23:12:02,062 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1733181121203.ace763b190e7e1c4038a215dd6005521.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-02T23:12:02,062 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:12:02,062 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T23:12:02,063 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1733181121203.876889227da0811eedb8889a1116e0f3. 2024-12-02T23:12:02,063 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.HRegion(2603): Flush status journal for 876889227da0811eedb8889a1116e0f3: 2024-12-02T23:12:02,063 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1733181121203.876889227da0811eedb8889a1116e0f3. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-02T23:12:02,063 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1733181121203.876889227da0811eedb8889a1116e0f3.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-02T23:12:02,063 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:12:02,063 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T23:12:02,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741972_1148 (size=68) 2024-12-02T23:12:02,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741972_1148 (size=68) 2024-12-02T23:12:02,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741972_1148 (size=68) 2024-12-02T23:12:02,078 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1733181121203.ace763b190e7e1c4038a215dd6005521. 2024-12-02T23:12:02,078 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-12-02T23:12:02,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=70 2024-12-02T23:12:02,079 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region ace763b190e7e1c4038a215dd6005521 2024-12-02T23:12:02,079 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ace763b190e7e1c4038a215dd6005521 2024-12-02T23:12:02,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741973_1149 (size=68) 2024-12-02T23:12:02,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741973_1149 (size=68) 2024-12-02T23:12:02,087 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=70, ppid=68, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ace763b190e7e1c4038a215dd6005521 in 177 msec 2024-12-02T23:12:02,094 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1733181121203.876889227da0811eedb8889a1116e0f3. 2024-12-02T23:12:02,094 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=69 2024-12-02T23:12:02,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=69 2024-12-02T23:12:02,095 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 876889227da0811eedb8889a1116e0f3 2024-12-02T23:12:02,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741973_1149 (size=68) 2024-12-02T23:12:02,097 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 876889227da0811eedb8889a1116e0f3 2024-12-02T23:12:02,110 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=69, resume processing ppid=68 2024-12-02T23:12:02,110 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=69, ppid=68, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 876889227da0811eedb8889a1116e0f3 in 193 msec 2024-12-02T23:12:02,110 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T23:12:02,111 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T23:12:02,112 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T23:12:02,112 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T23:12:02,112 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:12:02,113 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-02T23:12:02,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741974_1150 (size=60) 2024-12-02T23:12:02,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741974_1150 (size=60) 2024-12-02T23:12:02,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741974_1150 (size=60) 2024-12-02T23:12:02,126 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T23:12:02,126 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithResetTtl 2024-12-02T23:12:02,126 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl 2024-12-02T23:12:02,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741975_1151 (size=641) 2024-12-02T23:12:02,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741975_1151 (size=641) 2024-12-02T23:12:02,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741975_1151 (size=641) 2024-12-02T23:12:02,141 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T23:12:02,147 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T23:12:02,148 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/emptySnaptb0-testExportWithResetTtl 2024-12-02T23:12:02,149 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T23:12:02,150 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-02T23:12:02,151 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=68, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 259 msec 2024-12-02T23:12:02,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-02T23:12:02,213 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-02T23:12:02,220 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41311 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,,1733181121203.876889227da0811eedb8889a1116e0f3. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T23:12:02,222 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38049 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,1,1733181121203.ace763b190e7e1c4038a215dd6005521. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T23:12:02,224 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T23:12:02,227 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-12-02T23:12:02,227 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1733181121203.876889227da0811eedb8889a1116e0f3. 2024-12-02T23:12:02,228 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T23:12:02,233 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T23:12:02,238 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T23:12:02,245 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T23:12:02,249 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-02T23:12:02,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733181122249 (current time:1733181122249). 2024-12-02T23:12:02,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T23:12:02,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-02T23:12:02,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T23:12:02,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2adbbc67, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:02,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:12:02,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:12:02,252 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:12:02,252 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:12:02,252 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:12:02,253 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@772c59ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:02,253 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:12:02,253 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:12:02,253 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:02,254 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49314, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:12:02,255 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3573b46e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:02,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:12:02,257 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:12:02,257 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:12:02,258 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39922, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:12:02,259 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:12:02,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:12:02,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:02,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:02,260 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:12:02,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@792c7185, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:02,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:12:02,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:12:02,262 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:12:02,262 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:12:02,262 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:12:02,262 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@174a347e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:02,262 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:12:02,263 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:12:02,263 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:02,264 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49326, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:12:02,265 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@294c49b9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:02,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:12:02,266 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:12:02,267 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:12:02,267 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39938, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:12:02,269 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b., hostname=9e5ab3b623d7,41311,1733180945828, seqNum=2] 2024-12-02T23:12:02,270 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:12:02,271 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43564, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:12:02,272 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:12:02,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:12:02,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:02,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:02,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-02T23:12:02,273 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:12:02,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T23:12:02,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-02T23:12:02,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-02T23:12:02,275 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T23:12:02,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-02T23:12:02,276 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T23:12:02,279 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T23:12:02,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741976_1152 (size=156) 2024-12-02T23:12:02,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741976_1152 (size=156) 2024-12-02T23:12:02,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741976_1152 (size=156) 2024-12-02T23:12:02,290 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T23:12:02,290 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 876889227da0811eedb8889a1116e0f3}, {pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ace763b190e7e1c4038a215dd6005521}] 2024-12-02T23:12:02,291 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ace763b190e7e1c4038a215dd6005521 2024-12-02T23:12:02,291 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 876889227da0811eedb8889a1116e0f3 2024-12-02T23:12:02,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-02T23:12:02,443 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38049 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=73 2024-12-02T23:12:02,444 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1733181121203.ace763b190e7e1c4038a215dd6005521. 2024-12-02T23:12:02,444 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2902): Flushing ace763b190e7e1c4038a215dd6005521 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-02T23:12:02,444 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41311 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=72 2024-12-02T23:12:02,445 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1733181121203.876889227da0811eedb8889a1116e0f3. 2024-12-02T23:12:02,445 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2902): Flushing 876889227da0811eedb8889a1116e0f3 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-02T23:12:02,469 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202466f4a7722a5408f86f361e6659074de_876889227da0811eedb8889a1116e0f3 is 71, key is 04572f3f1ebd1405028f4ef51587e183/cf:q/1733181122220/Put/seqid=0 2024-12-02T23:12:02,471 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241202167229515bdd45f4b297f7059c534345_ace763b190e7e1c4038a215dd6005521 is 71, key is 1583cd634301293543c08d9c26886d65/cf:q/1733181122222/Put/seqid=0 2024-12-02T23:12:02,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741977_1153 (size=5241) 2024-12-02T23:12:02,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741977_1153 (size=5241) 2024-12-02T23:12:02,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741977_1153 (size=5241) 2024-12-02T23:12:02,481 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:12:02,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741978_1154 (size=8031) 2024-12-02T23:12:02,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741978_1154 (size=8031) 2024-12-02T23:12:02,484 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:12:02,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741978_1154 (size=8031) 2024-12-02T23:12:02,488 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202466f4a7722a5408f86f361e6659074de_876889227da0811eedb8889a1116e0f3 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241202466f4a7722a5408f86f361e6659074de_876889227da0811eedb8889a1116e0f3 2024-12-02T23:12:02,489 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241202167229515bdd45f4b297f7059c534345_ace763b190e7e1c4038a215dd6005521 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b20241202167229515bdd45f4b297f7059c534345_ace763b190e7e1c4038a215dd6005521 2024-12-02T23:12:02,490 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithResetTtl/876889227da0811eedb8889a1116e0f3/.tmp/cf/909aa0b9bfc7465c9ad3e4ce01b84247, store: [table=testtb-testExportWithResetTtl family=cf region=876889227da0811eedb8889a1116e0f3] 2024-12-02T23:12:02,490 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithResetTtl/ace763b190e7e1c4038a215dd6005521/.tmp/cf/31cf6eb94c594658816e1639ed0a10fe, store: [table=testtb-testExportWithResetTtl family=cf region=ace763b190e7e1c4038a215dd6005521] 2024-12-02T23:12:02,490 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithResetTtl/876889227da0811eedb8889a1116e0f3/.tmp/cf/909aa0b9bfc7465c9ad3e4ce01b84247 is 206, key is 06912c5346700d77b782f6e9ba41f629c/cf:q/1733181122220/Put/seqid=0 2024-12-02T23:12:02,490 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithResetTtl/ace763b190e7e1c4038a215dd6005521/.tmp/cf/31cf6eb94c594658816e1639ed0a10fe is 206, key is 1cadba8440a9ec1fd04ab4ad3905e2a4c/cf:q/1733181122222/Put/seqid=0 2024-12-02T23:12:02,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741979_1155 (size=6308) 2024-12-02T23:12:02,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741980_1156 (size=14449) 2024-12-02T23:12:02,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741980_1156 (size=14449) 2024-12-02T23:12:02,501 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=333, hasBloomFilter=true, into tmp file hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithResetTtl/876889227da0811eedb8889a1116e0f3/.tmp/cf/909aa0b9bfc7465c9ad3e4ce01b84247 2024-12-02T23:12:02,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741979_1155 (size=6308) 2024-12-02T23:12:02,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741980_1156 (size=14449) 2024-12-02T23:12:02,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741979_1155 (size=6308) 2024-12-02T23:12:02,507 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithResetTtl/876889227da0811eedb8889a1116e0f3/.tmp/cf/909aa0b9bfc7465c9ad3e4ce01b84247 as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithResetTtl/876889227da0811eedb8889a1116e0f3/cf/909aa0b9bfc7465c9ad3e4ce01b84247 2024-12-02T23:12:02,508 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-12-02T23:12:02,512 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithResetTtl/876889227da0811eedb8889a1116e0f3/cf/909aa0b9bfc7465c9ad3e4ce01b84247, entries=5, sequenceid=6, filesize=6.2 K 2024-12-02T23:12:02,513 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for 876889227da0811eedb8889a1116e0f3 in 68ms, sequenceid=6, compaction requested=false 2024-12-02T23:12:02,513 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2603): Flush status journal for 876889227da0811eedb8889a1116e0f3: 2024-12-02T23:12:02,513 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1733181121203.876889227da0811eedb8889a1116e0f3. for snaptb0-testExportWithResetTtl completed. 2024-12-02T23:12:02,513 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1733181121203.876889227da0811eedb8889a1116e0f3.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-02T23:12:02,513 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:12:02,513 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithResetTtl/876889227da0811eedb8889a1116e0f3/cf/909aa0b9bfc7465c9ad3e4ce01b84247] hfiles 2024-12-02T23:12:02,513 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithResetTtl/876889227da0811eedb8889a1116e0f3/cf/909aa0b9bfc7465c9ad3e4ce01b84247 for snapshot=snaptb0-testExportWithResetTtl 2024-12-02T23:12:02,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741981_1157 (size=107) 2024-12-02T23:12:02,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741981_1157 (size=107) 2024-12-02T23:12:02,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741981_1157 (size=107) 2024-12-02T23:12:02,523 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1733181121203.876889227da0811eedb8889a1116e0f3. 2024-12-02T23:12:02,524 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-02T23:12:02,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=72 2024-12-02T23:12:02,524 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 876889227da0811eedb8889a1116e0f3 2024-12-02T23:12:02,525 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 876889227da0811eedb8889a1116e0f3 2024-12-02T23:12:02,529 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=72, ppid=71, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 876889227da0811eedb8889a1116e0f3 in 236 msec 2024-12-02T23:12:02,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-02T23:12:02,901 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithResetTtl/ace763b190e7e1c4038a215dd6005521/.tmp/cf/31cf6eb94c594658816e1639ed0a10fe 2024-12-02T23:12:02,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-02T23:12:02,908 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithResetTtl/ace763b190e7e1c4038a215dd6005521/.tmp/cf/31cf6eb94c594658816e1639ed0a10fe as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithResetTtl/ace763b190e7e1c4038a215dd6005521/cf/31cf6eb94c594658816e1639ed0a10fe 2024-12-02T23:12:02,914 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithResetTtl/ace763b190e7e1c4038a215dd6005521/cf/31cf6eb94c594658816e1639ed0a10fe, entries=45, sequenceid=6, filesize=14.1 K 2024-12-02T23:12:02,915 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for ace763b190e7e1c4038a215dd6005521 in 471ms, sequenceid=6, compaction requested=false 2024-12-02T23:12:02,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2603): Flush status journal for ace763b190e7e1c4038a215dd6005521: 2024-12-02T23:12:02,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1733181121203.ace763b190e7e1c4038a215dd6005521. for snaptb0-testExportWithResetTtl completed. 2024-12-02T23:12:02,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1733181121203.ace763b190e7e1c4038a215dd6005521.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-02T23:12:02,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:12:02,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithResetTtl/ace763b190e7e1c4038a215dd6005521/cf/31cf6eb94c594658816e1639ed0a10fe] hfiles 2024-12-02T23:12:02,915 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithResetTtl/ace763b190e7e1c4038a215dd6005521/cf/31cf6eb94c594658816e1639ed0a10fe for snapshot=snaptb0-testExportWithResetTtl 2024-12-02T23:12:02,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741982_1158 (size=107) 2024-12-02T23:12:02,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741982_1158 (size=107) 2024-12-02T23:12:02,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741982_1158 (size=107) 2024-12-02T23:12:02,922 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1733181121203.ace763b190e7e1c4038a215dd6005521. 2024-12-02T23:12:02,922 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=73 2024-12-02T23:12:02,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=73 2024-12-02T23:12:02,923 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region ace763b190e7e1c4038a215dd6005521 2024-12-02T23:12:02,923 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ace763b190e7e1c4038a215dd6005521 2024-12-02T23:12:02,926 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=73, resume processing ppid=71 2024-12-02T23:12:02,926 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=73, ppid=71, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ace763b190e7e1c4038a215dd6005521 in 634 msec 2024-12-02T23:12:02,926 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T23:12:02,927 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T23:12:02,928 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T23:12:02,928 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T23:12:02,928 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:12:02,930 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b20241202167229515bdd45f4b297f7059c534345_ace763b190e7e1c4038a215dd6005521, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241202466f4a7722a5408f86f361e6659074de_876889227da0811eedb8889a1116e0f3] hfiles 2024-12-02T23:12:02,930 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b20241202167229515bdd45f4b297f7059c534345_ace763b190e7e1c4038a215dd6005521 2024-12-02T23:12:02,930 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241202466f4a7722a5408f86f361e6659074de_876889227da0811eedb8889a1116e0f3 2024-12-02T23:12:02,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741983_1159 (size=291) 2024-12-02T23:12:02,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741983_1159 (size=291) 2024-12-02T23:12:02,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741983_1159 (size=291) 2024-12-02T23:12:02,942 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T23:12:02,942 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithResetTtl 2024-12-02T23:12:02,943 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl 2024-12-02T23:12:02,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741984_1160 (size=951) 2024-12-02T23:12:02,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741984_1160 (size=951) 2024-12-02T23:12:02,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741984_1160 (size=951) 2024-12-02T23:12:02,959 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T23:12:02,971 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T23:12:02,971 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testExportWithResetTtl 2024-12-02T23:12:02,973 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T23:12:02,973 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-02T23:12:02,975 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=71, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 700 msec 2024-12-02T23:12:02,981 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_3/usercache/jenkins/appcache/application_1733180955226_0002/container_1733180955226_0002_01_000005/launch_container.sh] 2024-12-02T23:12:02,981 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_3/usercache/jenkins/appcache/application_1733180955226_0002/container_1733180955226_0002_01_000005/container_tokens] 2024-12-02T23:12:02,982 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_3/usercache/jenkins/appcache/application_1733180955226_0002/container_1733180955226_0002_01_000005/sysfs] 2024-12-02T23:12:03,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-02T23:12:03,412 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-02T23:12:03,414 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T23:12:03,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportWithResetTtl 2024-12-02T23:12:03,417 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T23:12:03,417 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testExportWithResetTtl" procId is: 74 2024-12-02T23:12:03,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-02T23:12:03,418 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T23:12:03,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741985_1161 (size=433) 2024-12-02T23:12:03,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741985_1161 (size=433) 2024-12-02T23:12:03,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741985_1161 (size=433) 2024-12-02T23:12:03,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-02T23:12:03,583 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T23:12:03,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-02T23:12:03,844 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 8bc51cc2e95ea6782b0a19d49e8a7b9a, NAME => 'testExportWithResetTtl,1,1733181123414.8bc51cc2e95ea6782b0a19d49e8a7b9a.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:12:03,845 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => b21f32e3497ff4e9c22800eb97703114, NAME => 'testExportWithResetTtl,,1733181123414.b21f32e3497ff4e9c22800eb97703114.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:12:03,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741986_1162 (size=58) 2024-12-02T23:12:03,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741986_1162 (size=58) 2024-12-02T23:12:03,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741986_1162 (size=58) 2024-12-02T23:12:03,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741987_1163 (size=58) 2024-12-02T23:12:03,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741987_1163 (size=58) 2024-12-02T23:12:03,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741987_1163 (size=58) 2024-12-02T23:12:03,866 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1733181123414.b21f32e3497ff4e9c22800eb97703114.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:12:03,866 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing b21f32e3497ff4e9c22800eb97703114, disabling compactions & flushes 2024-12-02T23:12:03,867 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1733181123414.b21f32e3497ff4e9c22800eb97703114. 2024-12-02T23:12:03,867 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1733181123414.b21f32e3497ff4e9c22800eb97703114. 2024-12-02T23:12:03,867 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1733181123414.b21f32e3497ff4e9c22800eb97703114. after waiting 0 ms 2024-12-02T23:12:03,867 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1733181123414.b21f32e3497ff4e9c22800eb97703114. 2024-12-02T23:12:03,867 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1733181123414.b21f32e3497ff4e9c22800eb97703114. 2024-12-02T23:12:03,867 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1733181123414.8bc51cc2e95ea6782b0a19d49e8a7b9a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:12:03,867 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for b21f32e3497ff4e9c22800eb97703114: Waiting for close lock at 1733181123866Disabling compacts and flushes for region at 1733181123866Disabling writes for close at 1733181123867 (+1 ms)Writing region close event to WAL at 1733181123867Closed at 1733181123867 2024-12-02T23:12:03,867 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing 8bc51cc2e95ea6782b0a19d49e8a7b9a, disabling compactions & flushes 2024-12-02T23:12:03,867 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1733181123414.8bc51cc2e95ea6782b0a19d49e8a7b9a. 2024-12-02T23:12:03,867 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1733181123414.8bc51cc2e95ea6782b0a19d49e8a7b9a. 2024-12-02T23:12:03,867 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1733181123414.8bc51cc2e95ea6782b0a19d49e8a7b9a. after waiting 0 ms 2024-12-02T23:12:03,867 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1733181123414.8bc51cc2e95ea6782b0a19d49e8a7b9a. 2024-12-02T23:12:03,867 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1733181123414.8bc51cc2e95ea6782b0a19d49e8a7b9a. 2024-12-02T23:12:03,867 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for 8bc51cc2e95ea6782b0a19d49e8a7b9a: Waiting for close lock at 1733181123867Disabling compacts and flushes for region at 1733181123867Disabling writes for close at 1733181123867Writing region close event to WAL at 1733181123867Closed at 1733181123867 2024-12-02T23:12:03,870 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T23:12:03,871 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,,1733181123414.b21f32e3497ff4e9c22800eb97703114.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1733181123870"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733181123870"}]},"ts":"1733181123870"} 2024-12-02T23:12:03,871 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,1,1733181123414.8bc51cc2e95ea6782b0a19d49e8a7b9a.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1733181123870"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733181123870"}]},"ts":"1733181123870"} 2024-12-02T23:12:03,874 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T23:12:03,876 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T23:12:03,876 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181123876"}]},"ts":"1733181123876"} 2024-12-02T23:12:03,878 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-02T23:12:03,878 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {9e5ab3b623d7=0} racks are {/default-rack=0} 2024-12-02T23:12:03,880 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T23:12:03,880 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T23:12:03,880 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T23:12:03,880 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T23:12:03,880 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T23:12:03,880 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T23:12:03,880 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T23:12:03,880 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T23:12:03,880 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T23:12:03,880 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T23:12:03,880 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=b21f32e3497ff4e9c22800eb97703114, ASSIGN}, {pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=8bc51cc2e95ea6782b0a19d49e8a7b9a, ASSIGN}] 2024-12-02T23:12:03,881 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=8bc51cc2e95ea6782b0a19d49e8a7b9a, ASSIGN 2024-12-02T23:12:03,881 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=b21f32e3497ff4e9c22800eb97703114, ASSIGN 2024-12-02T23:12:03,882 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=8bc51cc2e95ea6782b0a19d49e8a7b9a, ASSIGN; state=OFFLINE, location=9e5ab3b623d7,38049,1733180945779; forceNewPlan=false, retain=false 2024-12-02T23:12:03,882 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=b21f32e3497ff4e9c22800eb97703114, ASSIGN; state=OFFLINE, location=9e5ab3b623d7,39847,1733180945622; forceNewPlan=false, retain=false 2024-12-02T23:12:04,033 INFO [9e5ab3b623d7:41513 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T23:12:04,033 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=76 updating hbase:meta row=8bc51cc2e95ea6782b0a19d49e8a7b9a, regionState=OPENING, regionLocation=9e5ab3b623d7,38049,1733180945779 2024-12-02T23:12:04,033 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=75 updating hbase:meta row=b21f32e3497ff4e9c22800eb97703114, regionState=OPENING, regionLocation=9e5ab3b623d7,39847,1733180945622 2024-12-02T23:12:04,036 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=b21f32e3497ff4e9c22800eb97703114, ASSIGN because future has completed 2024-12-02T23:12:04,036 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=77, ppid=75, state=RUNNABLE, hasLock=false; OpenRegionProcedure b21f32e3497ff4e9c22800eb97703114, server=9e5ab3b623d7,39847,1733180945622}] 2024-12-02T23:12:04,037 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=8bc51cc2e95ea6782b0a19d49e8a7b9a, ASSIGN because future has completed 2024-12-02T23:12:04,037 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=78, ppid=76, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8bc51cc2e95ea6782b0a19d49e8a7b9a, server=9e5ab3b623d7,38049,1733180945779}] 2024-12-02T23:12:04,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-02T23:12:04,191 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,,1733181123414.b21f32e3497ff4e9c22800eb97703114. 2024-12-02T23:12:04,192 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7752): Opening region: {ENCODED => b21f32e3497ff4e9c22800eb97703114, NAME => 'testExportWithResetTtl,,1733181123414.b21f32e3497ff4e9c22800eb97703114.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T23:12:04,192 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,1,1733181123414.8bc51cc2e95ea6782b0a19d49e8a7b9a. 2024-12-02T23:12:04,192 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,,1733181123414.b21f32e3497ff4e9c22800eb97703114. service=AccessControlService 2024-12-02T23:12:04,192 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7752): Opening region: {ENCODED => 8bc51cc2e95ea6782b0a19d49e8a7b9a, NAME => 'testExportWithResetTtl,1,1733181123414.8bc51cc2e95ea6782b0a19d49e8a7b9a.', STARTKEY => '1', ENDKEY => ''} 2024-12-02T23:12:04,192 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T23:12:04,192 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,1,1733181123414.8bc51cc2e95ea6782b0a19d49e8a7b9a. service=AccessControlService 2024-12-02T23:12:04,193 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl b21f32e3497ff4e9c22800eb97703114 2024-12-02T23:12:04,193 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T23:12:04,193 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1733181123414.b21f32e3497ff4e9c22800eb97703114.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:12:04,193 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 8bc51cc2e95ea6782b0a19d49e8a7b9a 2024-12-02T23:12:04,193 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7794): checking encryption for b21f32e3497ff4e9c22800eb97703114 2024-12-02T23:12:04,193 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7797): checking classloading for b21f32e3497ff4e9c22800eb97703114 2024-12-02T23:12:04,193 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1733181123414.8bc51cc2e95ea6782b0a19d49e8a7b9a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:12:04,193 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7794): checking encryption for 8bc51cc2e95ea6782b0a19d49e8a7b9a 2024-12-02T23:12:04,193 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7797): checking classloading for 8bc51cc2e95ea6782b0a19d49e8a7b9a 2024-12-02T23:12:04,194 INFO [StoreOpener-b21f32e3497ff4e9c22800eb97703114-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b21f32e3497ff4e9c22800eb97703114 2024-12-02T23:12:04,194 INFO [StoreOpener-8bc51cc2e95ea6782b0a19d49e8a7b9a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 8bc51cc2e95ea6782b0a19d49e8a7b9a 2024-12-02T23:12:04,196 INFO [StoreOpener-8bc51cc2e95ea6782b0a19d49e8a7b9a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8bc51cc2e95ea6782b0a19d49e8a7b9a columnFamilyName cf 2024-12-02T23:12:04,196 INFO [StoreOpener-b21f32e3497ff4e9c22800eb97703114-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b21f32e3497ff4e9c22800eb97703114 columnFamilyName cf 2024-12-02T23:12:04,197 DEBUG [StoreOpener-8bc51cc2e95ea6782b0a19d49e8a7b9a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:12:04,197 DEBUG [StoreOpener-b21f32e3497ff4e9c22800eb97703114-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:12:04,197 INFO [StoreOpener-8bc51cc2e95ea6782b0a19d49e8a7b9a-1 {}] regionserver.HStore(327): Store=8bc51cc2e95ea6782b0a19d49e8a7b9a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T23:12:04,197 INFO [StoreOpener-b21f32e3497ff4e9c22800eb97703114-1 {}] regionserver.HStore(327): Store=b21f32e3497ff4e9c22800eb97703114/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T23:12:04,198 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1038): replaying wal for 8bc51cc2e95ea6782b0a19d49e8a7b9a 2024-12-02T23:12:04,198 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1038): replaying wal for b21f32e3497ff4e9c22800eb97703114 2024-12-02T23:12:04,198 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportWithResetTtl/8bc51cc2e95ea6782b0a19d49e8a7b9a 2024-12-02T23:12:04,198 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportWithResetTtl/b21f32e3497ff4e9c22800eb97703114 2024-12-02T23:12:04,198 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportWithResetTtl/8bc51cc2e95ea6782b0a19d49e8a7b9a 2024-12-02T23:12:04,199 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportWithResetTtl/b21f32e3497ff4e9c22800eb97703114 2024-12-02T23:12:04,199 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1048): stopping wal replay for 8bc51cc2e95ea6782b0a19d49e8a7b9a 2024-12-02T23:12:04,199 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1060): Cleaning up temporary data for 8bc51cc2e95ea6782b0a19d49e8a7b9a 2024-12-02T23:12:04,199 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1048): stopping wal replay for b21f32e3497ff4e9c22800eb97703114 2024-12-02T23:12:04,199 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1060): Cleaning up temporary data for b21f32e3497ff4e9c22800eb97703114 2024-12-02T23:12:04,200 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1093): writing seq id for 8bc51cc2e95ea6782b0a19d49e8a7b9a 2024-12-02T23:12:04,201 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1093): writing seq id for b21f32e3497ff4e9c22800eb97703114 2024-12-02T23:12:04,203 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportWithResetTtl/8bc51cc2e95ea6782b0a19d49e8a7b9a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T23:12:04,203 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportWithResetTtl/b21f32e3497ff4e9c22800eb97703114/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T23:12:04,203 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1114): Opened 8bc51cc2e95ea6782b0a19d49e8a7b9a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68552033, jitterRate=0.021504893898963928}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T23:12:04,203 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8bc51cc2e95ea6782b0a19d49e8a7b9a 2024-12-02T23:12:04,203 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1114): Opened b21f32e3497ff4e9c22800eb97703114; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69691495, jitterRate=0.038484200835227966}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T23:12:04,203 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b21f32e3497ff4e9c22800eb97703114 2024-12-02T23:12:04,204 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1006): Region open journal for 8bc51cc2e95ea6782b0a19d49e8a7b9a: Running coprocessor pre-open hook at 1733181124193Writing region info on filesystem at 1733181124193Initializing all the Stores at 1733181124194 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733181124194Cleaning up temporary data from old regions at 1733181124199 (+5 ms)Running coprocessor post-open hooks at 1733181124203 (+4 ms)Region opened successfully at 1733181124204 (+1 ms) 2024-12-02T23:12:04,204 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1006): Region open journal for b21f32e3497ff4e9c22800eb97703114: Running coprocessor pre-open hook at 1733181124193Writing region info on filesystem at 1733181124193Initializing all the Stores at 1733181124194 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733181124194Cleaning up temporary data from old regions at 1733181124199 (+5 ms)Running coprocessor post-open hooks at 1733181124203 (+4 ms)Region opened successfully at 1733181124204 (+1 ms) 2024-12-02T23:12:04,204 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,,1733181123414.b21f32e3497ff4e9c22800eb97703114., pid=77, masterSystemTime=1733181124188 2024-12-02T23:12:04,204 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,1,1733181123414.8bc51cc2e95ea6782b0a19d49e8a7b9a., pid=78, masterSystemTime=1733181124189 2024-12-02T23:12:04,206 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,,1733181123414.b21f32e3497ff4e9c22800eb97703114. 2024-12-02T23:12:04,206 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,,1733181123414.b21f32e3497ff4e9c22800eb97703114. 2024-12-02T23:12:04,207 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=75 updating hbase:meta row=b21f32e3497ff4e9c22800eb97703114, regionState=OPEN, openSeqNum=2, regionLocation=9e5ab3b623d7,39847,1733180945622 2024-12-02T23:12:04,207 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,1,1733181123414.8bc51cc2e95ea6782b0a19d49e8a7b9a. 2024-12-02T23:12:04,207 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,1,1733181123414.8bc51cc2e95ea6782b0a19d49e8a7b9a. 2024-12-02T23:12:04,208 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=76 updating hbase:meta row=8bc51cc2e95ea6782b0a19d49e8a7b9a, regionState=OPEN, openSeqNum=2, regionLocation=9e5ab3b623d7,38049,1733180945779 2024-12-02T23:12:04,208 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=77, ppid=75, state=RUNNABLE, hasLock=false; OpenRegionProcedure b21f32e3497ff4e9c22800eb97703114, server=9e5ab3b623d7,39847,1733180945622 because future has completed 2024-12-02T23:12:04,209 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=78, ppid=76, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8bc51cc2e95ea6782b0a19d49e8a7b9a, server=9e5ab3b623d7,38049,1733180945779 because future has completed 2024-12-02T23:12:04,211 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=77, resume processing ppid=75 2024-12-02T23:12:04,211 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=77, ppid=75, state=SUCCESS, hasLock=false; OpenRegionProcedure b21f32e3497ff4e9c22800eb97703114, server=9e5ab3b623d7,39847,1733180945622 in 173 msec 2024-12-02T23:12:04,212 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=78, resume processing ppid=76 2024-12-02T23:12:04,212 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=78, ppid=76, state=SUCCESS, hasLock=false; OpenRegionProcedure 8bc51cc2e95ea6782b0a19d49e8a7b9a, server=9e5ab3b623d7,38049,1733180945779 in 173 msec 2024-12-02T23:12:04,213 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=75, ppid=74, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=b21f32e3497ff4e9c22800eb97703114, ASSIGN in 331 msec 2024-12-02T23:12:04,214 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=76, resume processing ppid=74 2024-12-02T23:12:04,214 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=76, ppid=74, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=8bc51cc2e95ea6782b0a19d49e8a7b9a, ASSIGN in 332 msec 2024-12-02T23:12:04,215 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T23:12:04,215 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181124215"}]},"ts":"1733181124215"} 2024-12-02T23:12:04,216 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-02T23:12:04,217 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T23:12:04,217 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testExportWithResetTtl jenkins: RWXCA 2024-12-02T23:12:04,220 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41311 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-02T23:12:04,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:12:04,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:12:04,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:12:04,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:12:04,227 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T23:12:04,227 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T23:12:04,227 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T23:12:04,227 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T23:12:04,227 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T23:12:04,228 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T23:12:04,228 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T23:12:04,228 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T23:12:04,228 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=74, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportWithResetTtl in 812 msec 2024-12-02T23:12:04,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-02T23:12:04,552 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportWithResetTtl completed 2024-12-02T23:12:04,553 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T23:12:04,556 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-12-02T23:12:04,556 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1733181123414.b21f32e3497ff4e9c22800eb97703114. 2024-12-02T23:12:04,556 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T23:12:04,558 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T23:12:04,562 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T23:12:04,567 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T23:12:04,574 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39847 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,,1733181123414.b21f32e3497ff4e9c22800eb97703114. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T23:12:04,578 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38049 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,1,1733181123414.8bc51cc2e95ea6782b0a19d49e8a7b9a. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T23:12:04,579 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T23:12:04,581 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-12-02T23:12:04,581 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1733181123414.b21f32e3497ff4e9c22800eb97703114. 2024-12-02T23:12:04,582 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T23:12:04,583 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T23:12:04,587 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T23:12:04,592 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T23:12:04,595 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-02T23:12:04,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733181124595 (current time:1733181124595). 2024-12-02T23:12:04,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-02T23:12:04,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T23:12:04,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20e4fa3f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:04,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:12:04,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:12:04,596 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:12:04,597 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:12:04,597 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:12:04,597 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1bccc783, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:04,597 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:12:04,597 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:12:04,597 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:04,598 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49336, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:12:04,599 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71db6bd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:04,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:12:04,600 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:12:04,600 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:12:04,601 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39942, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:12:04,602 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:12:04,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:12:04,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:04,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:04,602 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:12:04,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a94135d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:04,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:12:04,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:12:04,604 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:12:04,604 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:12:04,604 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:12:04,604 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a6b16b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:04,604 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:12:04,605 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:12:04,605 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:04,605 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49350, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:12:04,606 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e949140, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:04,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:12:04,607 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:12:04,608 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:12:04,608 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39952, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:12:04,610 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b., hostname=9e5ab3b623d7,41311,1733180945828, seqNum=2] 2024-12-02T23:12:04,610 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:12:04,611 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43578, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:12:04,612 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:12:04,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:12:04,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:04,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:04,612 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:12:04,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-02T23:12:04,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T23:12:04,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=79, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-02T23:12:04,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 79 2024-12-02T23:12:04,615 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PREPARE 2024-12-02T23:12:04,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-02T23:12:04,616 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T23:12:04,618 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T23:12:04,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741988_1164 (size=143) 2024-12-02T23:12:04,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741988_1164 (size=143) 2024-12-02T23:12:04,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741988_1164 (size=143) 2024-12-02T23:12:04,628 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T23:12:04,628 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b21f32e3497ff4e9c22800eb97703114}, {pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8bc51cc2e95ea6782b0a19d49e8a7b9a}] 2024-12-02T23:12:04,629 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b21f32e3497ff4e9c22800eb97703114 2024-12-02T23:12:04,629 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8bc51cc2e95ea6782b0a19d49e8a7b9a 2024-12-02T23:12:04,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-02T23:12:04,781 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38049 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=81 2024-12-02T23:12:04,781 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39847 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=80 2024-12-02T23:12:04,781 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,1,1733181123414.8bc51cc2e95ea6782b0a19d49e8a7b9a. 2024-12-02T23:12:04,781 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,,1733181123414.b21f32e3497ff4e9c22800eb97703114. 2024-12-02T23:12:04,781 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(2902): Flushing 8bc51cc2e95ea6782b0a19d49e8a7b9a 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-02T23:12:04,781 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(2902): Flushing b21f32e3497ff4e9c22800eb97703114 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-02T23:12:04,808 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412029ff196b56ee543b0b10ca219e987a657_b21f32e3497ff4e9c22800eb97703114 is 71, key is 0174d8ec1df8f6f442162a4a1fd73489/cf:q/1733181124574/Put/seqid=0 2024-12-02T23:12:04,808 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024120270da488914d640d1b46f9e974d1299bd_8bc51cc2e95ea6782b0a19d49e8a7b9a is 71, key is 11082877d3232402cc2cda449d679047/cf:q/1733181124577/Put/seqid=0 2024-12-02T23:12:04,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741989_1165 (size=5171) 2024-12-02T23:12:04,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741989_1165 (size=5171) 2024-12-02T23:12:04,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741989_1165 (size=5171) 2024-12-02T23:12:04,825 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:12:04,831 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412029ff196b56ee543b0b10ca219e987a657_b21f32e3497ff4e9c22800eb97703114 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e202412029ff196b56ee543b0b10ca219e987a657_b21f32e3497ff4e9c22800eb97703114 2024-12-02T23:12:04,832 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportWithResetTtl/b21f32e3497ff4e9c22800eb97703114/.tmp/cf/f2f842dea7e5495eb0d912702496070d, store: [table=testExportWithResetTtl family=cf region=b21f32e3497ff4e9c22800eb97703114] 2024-12-02T23:12:04,833 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportWithResetTtl/b21f32e3497ff4e9c22800eb97703114/.tmp/cf/f2f842dea7e5495eb0d912702496070d is 199, key is 021cfce24c77b7ce1ee9bda8841a0bce3/cf:q/1733181124574/Put/seqid=0 2024-12-02T23:12:04,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741990_1166 (size=8101) 2024-12-02T23:12:04,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741990_1166 (size=8101) 2024-12-02T23:12:04,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741990_1166 (size=8101) 2024-12-02T23:12:04,839 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:12:04,845 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024120270da488914d640d1b46f9e974d1299bd_8bc51cc2e95ea6782b0a19d49e8a7b9a to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b2024120270da488914d640d1b46f9e974d1299bd_8bc51cc2e95ea6782b0a19d49e8a7b9a 2024-12-02T23:12:04,846 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportWithResetTtl/8bc51cc2e95ea6782b0a19d49e8a7b9a/.tmp/cf/03297db76ca245a7b94f73ee7985b979, store: [table=testExportWithResetTtl family=cf region=8bc51cc2e95ea6782b0a19d49e8a7b9a] 2024-12-02T23:12:04,846 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportWithResetTtl/8bc51cc2e95ea6782b0a19d49e8a7b9a/.tmp/cf/03297db76ca245a7b94f73ee7985b979 is 199, key is 1daa404165380531f3a5995b2202b3fa7/cf:q/1733181124577/Put/seqid=0 2024-12-02T23:12:04,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741991_1167 (size=6071) 2024-12-02T23:12:04,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741991_1167 (size=6071) 2024-12-02T23:12:04,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741991_1167 (size=6071) 2024-12-02T23:12:04,871 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=266, hasBloomFilter=true, into tmp file hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportWithResetTtl/b21f32e3497ff4e9c22800eb97703114/.tmp/cf/f2f842dea7e5495eb0d912702496070d 2024-12-02T23:12:04,880 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportWithResetTtl/b21f32e3497ff4e9c22800eb97703114/.tmp/cf/f2f842dea7e5495eb0d912702496070d as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportWithResetTtl/b21f32e3497ff4e9c22800eb97703114/cf/f2f842dea7e5495eb0d912702496070d 2024-12-02T23:12:04,887 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportWithResetTtl/b21f32e3497ff4e9c22800eb97703114/cf/f2f842dea7e5495eb0d912702496070d, entries=4, sequenceid=5, filesize=5.9 K 2024-12-02T23:12:04,888 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for b21f32e3497ff4e9c22800eb97703114 in 107ms, sequenceid=5, compaction requested=false 2024-12-02T23:12:04,888 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-12-02T23:12:04,889 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(2603): Flush status journal for b21f32e3497ff4e9c22800eb97703114: 2024-12-02T23:12:04,889 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,,1733181123414.b21f32e3497ff4e9c22800eb97703114. for snaptb-testExportWithResetTtl completed. 2024-12-02T23:12:04,889 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,,1733181123414.b21f32e3497ff4e9c22800eb97703114.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-02T23:12:04,889 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:12:04,890 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportWithResetTtl/b21f32e3497ff4e9c22800eb97703114/cf/f2f842dea7e5495eb0d912702496070d] hfiles 2024-12-02T23:12:04,890 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportWithResetTtl/b21f32e3497ff4e9c22800eb97703114/cf/f2f842dea7e5495eb0d912702496070d for snapshot=snaptb-testExportWithResetTtl 2024-12-02T23:12:04,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741992_1168 (size=14322) 2024-12-02T23:12:04,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741992_1168 (size=14322) 2024-12-02T23:12:04,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741992_1168 (size=14322) 2024-12-02T23:12:04,897 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=3.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportWithResetTtl/8bc51cc2e95ea6782b0a19d49e8a7b9a/.tmp/cf/03297db76ca245a7b94f73ee7985b979 2024-12-02T23:12:04,905 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportWithResetTtl/8bc51cc2e95ea6782b0a19d49e8a7b9a/.tmp/cf/03297db76ca245a7b94f73ee7985b979 as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportWithResetTtl/8bc51cc2e95ea6782b0a19d49e8a7b9a/cf/03297db76ca245a7b94f73ee7985b979 2024-12-02T23:12:04,915 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportWithResetTtl/8bc51cc2e95ea6782b0a19d49e8a7b9a/cf/03297db76ca245a7b94f73ee7985b979, entries=46, sequenceid=5, filesize=14.0 K 2024-12-02T23:12:04,916 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 8bc51cc2e95ea6782b0a19d49e8a7b9a in 135ms, sequenceid=5, compaction requested=false 2024-12-02T23:12:04,916 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(2603): Flush status journal for 8bc51cc2e95ea6782b0a19d49e8a7b9a: 2024-12-02T23:12:04,916 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,1,1733181123414.8bc51cc2e95ea6782b0a19d49e8a7b9a. for snaptb-testExportWithResetTtl completed. 2024-12-02T23:12:04,917 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,1,1733181123414.8bc51cc2e95ea6782b0a19d49e8a7b9a.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-02T23:12:04,917 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:12:04,917 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportWithResetTtl/8bc51cc2e95ea6782b0a19d49e8a7b9a/cf/03297db76ca245a7b94f73ee7985b979] hfiles 2024-12-02T23:12:04,917 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportWithResetTtl/8bc51cc2e95ea6782b0a19d49e8a7b9a/cf/03297db76ca245a7b94f73ee7985b979 for snapshot=snaptb-testExportWithResetTtl 2024-12-02T23:12:04,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741993_1169 (size=100) 2024-12-02T23:12:04,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741993_1169 (size=100) 2024-12-02T23:12:04,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741993_1169 (size=100) 2024-12-02T23:12:04,925 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,,1733181123414.b21f32e3497ff4e9c22800eb97703114. 2024-12-02T23:12:04,925 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-12-02T23:12:04,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=80 2024-12-02T23:12:04,926 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region b21f32e3497ff4e9c22800eb97703114 2024-12-02T23:12:04,926 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b21f32e3497ff4e9c22800eb97703114 2024-12-02T23:12:04,929 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=80, ppid=79, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b21f32e3497ff4e9c22800eb97703114 in 299 msec 2024-12-02T23:12:04,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741994_1170 (size=100) 2024-12-02T23:12:04,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741994_1170 (size=100) 2024-12-02T23:12:04,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741994_1170 (size=100) 2024-12-02T23:12:04,931 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,1,1733181123414.8bc51cc2e95ea6782b0a19d49e8a7b9a. 2024-12-02T23:12:04,931 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=81 2024-12-02T23:12:04,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=81 2024-12-02T23:12:04,932 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 8bc51cc2e95ea6782b0a19d49e8a7b9a 2024-12-02T23:12:04,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-02T23:12:04,932 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8bc51cc2e95ea6782b0a19d49e8a7b9a 2024-12-02T23:12:04,934 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=81, resume processing ppid=79 2024-12-02T23:12:04,935 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=81, ppid=79, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 8bc51cc2e95ea6782b0a19d49e8a7b9a in 305 msec 2024-12-02T23:12:04,935 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T23:12:04,935 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T23:12:04,936 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T23:12:04,937 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T23:12:04,937 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:12:04,938 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b2024120270da488914d640d1b46f9e974d1299bd_8bc51cc2e95ea6782b0a19d49e8a7b9a, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e202412029ff196b56ee543b0b10ca219e987a657_b21f32e3497ff4e9c22800eb97703114] hfiles 2024-12-02T23:12:04,938 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b2024120270da488914d640d1b46f9e974d1299bd_8bc51cc2e95ea6782b0a19d49e8a7b9a 2024-12-02T23:12:04,939 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e202412029ff196b56ee543b0b10ca219e987a657_b21f32e3497ff4e9c22800eb97703114 2024-12-02T23:12:04,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741995_1171 (size=284) 2024-12-02T23:12:04,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741995_1171 (size=284) 2024-12-02T23:12:04,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741995_1171 (size=284) 2024-12-02T23:12:04,946 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T23:12:04,946 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb-testExportWithResetTtl 2024-12-02T23:12:04,947 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-02T23:12:04,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741996_1172 (size=923) 2024-12-02T23:12:04,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741996_1172 (size=923) 2024-12-02T23:12:04,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741996_1172 (size=923) 2024-12-02T23:12:04,963 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T23:12:04,976 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T23:12:04,977 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-02T23:12:04,978 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T23:12:04,978 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 79 2024-12-02T23:12:04,981 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=79, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } in 365 msec 2024-12-02T23:12:05,177 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0002_000001 (auth:SIMPLE) from 127.0.0.1:43050 2024-12-02T23:12:05,187 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_1/usercache/jenkins/appcache/application_1733180955226_0002/container_1733180955226_0002_01_000001/launch_container.sh] 2024-12-02T23:12:05,187 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_1/usercache/jenkins/appcache/application_1733180955226_0002/container_1733180955226_0002_01_000001/container_tokens] 2024-12-02T23:12:05,187 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_1/usercache/jenkins/appcache/application_1733180955226_0002/container_1733180955226_0002_01_000001/sysfs] 2024-12-02T23:12:05,202 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-02T23:12:05,202 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-02T23:12:05,203 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-02T23:12:05,203 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-02T23:12:05,204 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-02T23:12:05,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-02T23:12:05,242 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportWithResetTtl completed 2024-12-02T23:12:05,251 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181125251 2024-12-02T23:12:05,251 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:33369, tgtDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181125251, rawTgtDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181125251, srcFsUri=hdfs://localhost:33369, srcDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:12:05,283 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:33369, inputRoot=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:12:05,283 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_71179475_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181125251, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181125251/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-02T23:12:05,284 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-02T23:12:05,291 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb-testExportWithResetTtl to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181125251/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-02T23:12:05,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741997_1173 (size=143) 2024-12-02T23:12:05,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741997_1173 (size=143) 2024-12-02T23:12:05,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741997_1173 (size=143) 2024-12-02T23:12:05,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741998_1174 (size=923) 2024-12-02T23:12:05,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741998_1174 (size=923) 2024-12-02T23:12:05,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741998_1174 (size=923) 2024-12-02T23:12:05,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741999_1175 (size=141) 2024-12-02T23:12:05,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741999_1175 (size=141) 2024-12-02T23:12:05,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741999_1175 (size=141) 2024-12-02T23:12:05,331 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:12:05,331 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:12:05,331 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:12:06,373 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T23:12:06,528 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop-13687792298966369381.jar 2024-12-02T23:12:06,528 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:12:06,529 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:12:06,598 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop-3895393300352109631.jar 2024-12-02T23:12:06,598 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:12:06,598 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:12:06,599 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:12:06,599 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:12:06,599 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:12:06,599 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:12:06,600 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-02T23:12:06,600 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-02T23:12:06,600 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-02T23:12:06,600 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-02T23:12:06,601 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-02T23:12:06,601 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-02T23:12:06,601 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-02T23:12:06,601 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-02T23:12:06,602 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-02T23:12:06,602 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-02T23:12:06,602 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-02T23:12:06,602 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:12:06,602 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:12:06,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T23:12:06,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:12:06,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:12:06,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T23:12:06,604 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T23:12:06,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742000_1176 (size=131440) 2024-12-02T23:12:06,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742000_1176 (size=131440) 2024-12-02T23:12:06,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742000_1176 (size=131440) 2024-12-02T23:12:06,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742001_1177 (size=4188619) 2024-12-02T23:12:06,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742001_1177 (size=4188619) 2024-12-02T23:12:06,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742001_1177 (size=4188619) 2024-12-02T23:12:06,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742002_1178 (size=1323991) 2024-12-02T23:12:06,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742002_1178 (size=1323991) 2024-12-02T23:12:06,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742002_1178 (size=1323991) 2024-12-02T23:12:06,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742003_1179 (size=903926) 2024-12-02T23:12:06,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742003_1179 (size=903926) 2024-12-02T23:12:06,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742003_1179 (size=903926) 2024-12-02T23:12:06,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742004_1180 (size=8360083) 2024-12-02T23:12:06,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742004_1180 (size=8360083) 2024-12-02T23:12:06,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742004_1180 (size=8360083) 2024-12-02T23:12:06,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742005_1181 (size=443172) 2024-12-02T23:12:06,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742005_1181 (size=443172) 2024-12-02T23:12:06,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742005_1181 (size=443172) 2024-12-02T23:12:06,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742006_1182 (size=1877034) 2024-12-02T23:12:06,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742006_1182 (size=1877034) 2024-12-02T23:12:06,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742006_1182 (size=1877034) 2024-12-02T23:12:06,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742007_1183 (size=77835) 2024-12-02T23:12:06,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742007_1183 (size=77835) 2024-12-02T23:12:06,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742007_1183 (size=77835) 2024-12-02T23:12:06,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742008_1184 (size=30949) 2024-12-02T23:12:06,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742008_1184 (size=30949) 2024-12-02T23:12:06,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742008_1184 (size=30949) 2024-12-02T23:12:06,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742009_1185 (size=1597213) 2024-12-02T23:12:06,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742009_1185 (size=1597213) 2024-12-02T23:12:06,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742009_1185 (size=1597213) 2024-12-02T23:12:06,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742010_1186 (size=6424743) 2024-12-02T23:12:06,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742010_1186 (size=6424743) 2024-12-02T23:12:06,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742010_1186 (size=6424743) 2024-12-02T23:12:07,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742011_1187 (size=4695811) 2024-12-02T23:12:07,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742011_1187 (size=4695811) 2024-12-02T23:12:07,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742011_1187 (size=4695811) 2024-12-02T23:12:07,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742012_1188 (size=232957) 2024-12-02T23:12:07,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742012_1188 (size=232957) 2024-12-02T23:12:07,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742012_1188 (size=232957) 2024-12-02T23:12:07,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742013_1189 (size=127628) 2024-12-02T23:12:07,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742013_1189 (size=127628) 2024-12-02T23:12:07,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742013_1189 (size=127628) 2024-12-02T23:12:07,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742014_1190 (size=20406) 2024-12-02T23:12:07,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742014_1190 (size=20406) 2024-12-02T23:12:07,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742014_1190 (size=20406) 2024-12-02T23:12:07,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742015_1191 (size=5175431) 2024-12-02T23:12:07,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742015_1191 (size=5175431) 2024-12-02T23:12:07,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742015_1191 (size=5175431) 2024-12-02T23:12:07,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742016_1192 (size=217634) 2024-12-02T23:12:07,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742016_1192 (size=217634) 2024-12-02T23:12:07,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742016_1192 (size=217634) 2024-12-02T23:12:07,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742017_1193 (size=1832290) 2024-12-02T23:12:07,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742017_1193 (size=1832290) 2024-12-02T23:12:07,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742017_1193 (size=1832290) 2024-12-02T23:12:07,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742018_1194 (size=322274) 2024-12-02T23:12:07,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742018_1194 (size=322274) 2024-12-02T23:12:07,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742018_1194 (size=322274) 2024-12-02T23:12:07,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742019_1195 (size=503880) 2024-12-02T23:12:07,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742019_1195 (size=503880) 2024-12-02T23:12:07,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742019_1195 (size=503880) 2024-12-02T23:12:07,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742020_1196 (size=29229) 2024-12-02T23:12:07,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742020_1196 (size=29229) 2024-12-02T23:12:07,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742020_1196 (size=29229) 2024-12-02T23:12:07,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742021_1197 (size=24096) 2024-12-02T23:12:07,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742021_1197 (size=24096) 2024-12-02T23:12:07,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742021_1197 (size=24096) 2024-12-02T23:12:07,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742022_1198 (size=111872) 2024-12-02T23:12:07,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742022_1198 (size=111872) 2024-12-02T23:12:07,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742022_1198 (size=111872) 2024-12-02T23:12:07,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742023_1199 (size=45609) 2024-12-02T23:12:07,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742023_1199 (size=45609) 2024-12-02T23:12:07,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742023_1199 (size=45609) 2024-12-02T23:12:07,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742024_1200 (size=136454) 2024-12-02T23:12:07,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742024_1200 (size=136454) 2024-12-02T23:12:07,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742024_1200 (size=136454) 2024-12-02T23:12:07,512 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-02T23:12:07,514 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb-testExportWithResetTtl' hfile list 2024-12-02T23:12:07,516 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.0 K 2024-12-02T23:12:07,516 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=7.9 K 2024-12-02T23:12:07,516 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.9 K 2024-12-02T23:12:07,516 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.0 K 2024-12-02T23:12:07,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742025_1201 (size=995) 2024-12-02T23:12:07,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742025_1201 (size=995) 2024-12-02T23:12:07,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742025_1201 (size=995) 2024-12-02T23:12:07,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742026_1202 (size=35) 2024-12-02T23:12:07,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742026_1202 (size=35) 2024-12-02T23:12:07,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742026_1202 (size=35) 2024-12-02T23:12:07,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742027_1203 (size=303992) 2024-12-02T23:12:07,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742027_1203 (size=303992) 2024-12-02T23:12:07,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742027_1203 (size=303992) 2024-12-02T23:12:07,577 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T23:12:07,577 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T23:12:07,933 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0003_000001 (auth:SIMPLE) from 127.0.0.1:39932 2024-12-02T23:12:09,424 DEBUG [master/9e5ab3b623d7:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 8bc51cc2e95ea6782b0a19d49e8a7b9a changed from -1.0 to 0.0, refreshing cache 2024-12-02T23:12:09,424 DEBUG [master/9e5ab3b623d7:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region ace763b190e7e1c4038a215dd6005521 changed from -1.0 to 0.0, refreshing cache 2024-12-02T23:12:09,424 DEBUG [master/9e5ab3b623d7:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region b21f32e3497ff4e9c22800eb97703114 changed from -1.0 to 0.0, refreshing cache 2024-12-02T23:12:09,424 DEBUG [master/9e5ab3b623d7:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 876889227da0811eedb8889a1116e0f3 changed from -1.0 to 0.0, refreshing cache 2024-12-02T23:12:14,325 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0003_000001 (auth:SIMPLE) from 127.0.0.1:47568 2024-12-02T23:12:14,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742028_1204 (size=349690) 2024-12-02T23:12:14,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742028_1204 (size=349690) 2024-12-02T23:12:14,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742028_1204 (size=349690) 2024-12-02T23:12:16,747 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0003_000001 (auth:SIMPLE) from 127.0.0.1:52532 2024-12-02T23:12:16,749 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0003_000001 (auth:SIMPLE) from 127.0.0.1:36390 2024-12-02T23:12:17,422 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0003_000001 (auth:SIMPLE) from 127.0.0.1:52548 2024-12-02T23:12:17,424 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0003_000001 (auth:SIMPLE) from 127.0.0.1:36402 2024-12-02T23:12:20,181 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733180955226_0003_01_000006 while processing FINISH_CONTAINERS event 2024-12-02T23:12:24,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742029_1205 (size=14322) 2024-12-02T23:12:24,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742029_1205 (size=14322) 2024-12-02T23:12:24,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742029_1205 (size=14322) 2024-12-02T23:12:24,994 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_2/usercache/jenkins/appcache/application_1733180955226_0003/container_1733180955226_0003_01_000002/launch_container.sh] 2024-12-02T23:12:24,995 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_2/usercache/jenkins/appcache/application_1733180955226_0003/container_1733180955226_0003_01_000002/container_tokens] 2024-12-02T23:12:24,995 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_2/usercache/jenkins/appcache/application_1733180955226_0003/container_1733180955226_0003_01_000002/sysfs] 2024-12-02T23:12:25,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742031_1207 (size=6071) 2024-12-02T23:12:25,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742031_1207 (size=6071) 2024-12-02T23:12:25,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742031_1207 (size=6071) 2024-12-02T23:12:25,604 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_1/usercache/jenkins/appcache/application_1733180955226_0003/container_1733180955226_0003_01_000004/launch_container.sh] 2024-12-02T23:12:25,604 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_1/usercache/jenkins/appcache/application_1733180955226_0003/container_1733180955226_0003_01_000004/container_tokens] 2024-12-02T23:12:25,605 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_1/usercache/jenkins/appcache/application_1733180955226_0003/container_1733180955226_0003_01_000004/sysfs] 2024-12-02T23:12:26,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742032_1208 (size=8101) 2024-12-02T23:12:26,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742032_1208 (size=8101) 2024-12-02T23:12:26,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742032_1208 (size=8101) 2024-12-02T23:12:26,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742033_1209 (size=5171) 2024-12-02T23:12:26,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742033_1209 (size=5171) 2024-12-02T23:12:26,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742033_1209 (size=5171) 2024-12-02T23:12:26,979 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_3/usercache/jenkins/appcache/application_1733180955226_0003/container_1733180955226_0003_01_000003/launch_container.sh] 2024-12-02T23:12:26,979 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_3/usercache/jenkins/appcache/application_1733180955226_0003/container_1733180955226_0003_01_000003/container_tokens] 2024-12-02T23:12:26,979 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_3/usercache/jenkins/appcache/application_1733180955226_0003/container_1733180955226_0003_01_000003/sysfs] 2024-12-02T23:12:27,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742030_1206 (size=31706) 2024-12-02T23:12:27,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742030_1206 (size=31706) 2024-12-02T23:12:27,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742030_1206 (size=31706) 2024-12-02T23:12:27,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742034_1210 (size=462) 2024-12-02T23:12:27,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742034_1210 (size=462) 2024-12-02T23:12:27,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742034_1210 (size=462) 2024-12-02T23:12:27,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742035_1211 (size=31706) 2024-12-02T23:12:27,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742035_1211 (size=31706) 2024-12-02T23:12:27,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742035_1211 (size=31706) 2024-12-02T23:12:27,082 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_0/usercache/jenkins/appcache/application_1733180955226_0003/container_1733180955226_0003_01_000005/launch_container.sh] 2024-12-02T23:12:27,082 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_0/usercache/jenkins/appcache/application_1733180955226_0003/container_1733180955226_0003_01_000005/container_tokens] 2024-12-02T23:12:27,082 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_0/usercache/jenkins/appcache/application_1733180955226_0003/container_1733180955226_0003_01_000005/sysfs] 2024-12-02T23:12:27,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742036_1212 (size=349690) 2024-12-02T23:12:27,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742036_1212 (size=349690) 2024-12-02T23:12:27,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742036_1212 (size=349690) 2024-12-02T23:12:27,126 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0003_000001 (auth:SIMPLE) from 127.0.0.1:34270 2024-12-02T23:12:27,132 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0003_000001 (auth:SIMPLE) from 127.0.0.1:34274 2024-12-02T23:12:28,779 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-02T23:12:28,782 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-02T23:12:28,797 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb-testExportWithResetTtl 2024-12-02T23:12:28,797 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-02T23:12:28,798 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-02T23:12:28,798 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_71179475_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-02T23:12:28,799 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-02T23:12:28,799 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-02T23:12:28,799 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_71179475_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181125251/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181125251/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-02T23:12:28,799 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181125251/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-02T23:12:28,799 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181125251/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-02T23:12:28,807 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testExportWithResetTtl 2024-12-02T23:12:28,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=82, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportWithResetTtl 2024-12-02T23:12:28,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-02T23:12:28,810 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181148810"}]},"ts":"1733181148810"} 2024-12-02T23:12:28,812 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-02T23:12:28,812 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testExportWithResetTtl to state=DISABLING 2024-12-02T23:12:28,813 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=83, ppid=82, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl}] 2024-12-02T23:12:28,815 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=b21f32e3497ff4e9c22800eb97703114, UNASSIGN}, {pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=8bc51cc2e95ea6782b0a19d49e8a7b9a, UNASSIGN}] 2024-12-02T23:12:28,816 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=8bc51cc2e95ea6782b0a19d49e8a7b9a, UNASSIGN 2024-12-02T23:12:28,816 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=b21f32e3497ff4e9c22800eb97703114, UNASSIGN 2024-12-02T23:12:28,817 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=85 updating hbase:meta row=8bc51cc2e95ea6782b0a19d49e8a7b9a, regionState=CLOSING, regionLocation=9e5ab3b623d7,38049,1733180945779 2024-12-02T23:12:28,817 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=84 updating hbase:meta row=b21f32e3497ff4e9c22800eb97703114, regionState=CLOSING, regionLocation=9e5ab3b623d7,39847,1733180945622 2024-12-02T23:12:28,819 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=8bc51cc2e95ea6782b0a19d49e8a7b9a, UNASSIGN because future has completed 2024-12-02T23:12:28,819 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T23:12:28,819 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE, hasLock=false; CloseRegionProcedure 8bc51cc2e95ea6782b0a19d49e8a7b9a, server=9e5ab3b623d7,38049,1733180945779}] 2024-12-02T23:12:28,820 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=b21f32e3497ff4e9c22800eb97703114, UNASSIGN because future has completed 2024-12-02T23:12:28,820 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T23:12:28,820 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=87, ppid=84, state=RUNNABLE, hasLock=false; CloseRegionProcedure b21f32e3497ff4e9c22800eb97703114, server=9e5ab3b623d7,39847,1733180945622}] 2024-12-02T23:12:28,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-02T23:12:28,973 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(122): Close 8bc51cc2e95ea6782b0a19d49e8a7b9a 2024-12-02T23:12:28,973 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T23:12:28,973 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1722): Closing 8bc51cc2e95ea6782b0a19d49e8a7b9a, disabling compactions & flushes 2024-12-02T23:12:28,973 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1733181123414.8bc51cc2e95ea6782b0a19d49e8a7b9a. 2024-12-02T23:12:28,973 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1733181123414.8bc51cc2e95ea6782b0a19d49e8a7b9a. 2024-12-02T23:12:28,973 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1733181123414.8bc51cc2e95ea6782b0a19d49e8a7b9a. after waiting 0 ms 2024-12-02T23:12:28,973 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1733181123414.8bc51cc2e95ea6782b0a19d49e8a7b9a. 2024-12-02T23:12:28,974 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(122): Close b21f32e3497ff4e9c22800eb97703114 2024-12-02T23:12:28,974 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T23:12:28,974 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1722): Closing b21f32e3497ff4e9c22800eb97703114, disabling compactions & flushes 2024-12-02T23:12:28,974 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1733181123414.b21f32e3497ff4e9c22800eb97703114. 2024-12-02T23:12:28,974 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1733181123414.b21f32e3497ff4e9c22800eb97703114. 2024-12-02T23:12:28,974 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1733181123414.b21f32e3497ff4e9c22800eb97703114. after waiting 0 ms 2024-12-02T23:12:28,974 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1733181123414.b21f32e3497ff4e9c22800eb97703114. 2024-12-02T23:12:28,979 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportWithResetTtl/8bc51cc2e95ea6782b0a19d49e8a7b9a/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-02T23:12:28,979 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportWithResetTtl/b21f32e3497ff4e9c22800eb97703114/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-02T23:12:28,980 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:12:28,980 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:12:28,980 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1733181123414.8bc51cc2e95ea6782b0a19d49e8a7b9a. 2024-12-02T23:12:28,980 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1733181123414.b21f32e3497ff4e9c22800eb97703114. 2024-12-02T23:12:28,980 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1676): Region close journal for b21f32e3497ff4e9c22800eb97703114: Waiting for close lock at 1733181148974Running coprocessor pre-close hooks at 1733181148974Disabling compacts and flushes for region at 1733181148974Disabling writes for close at 1733181148974Writing region close event to WAL at 1733181148975 (+1 ms)Running coprocessor post-close hooks at 1733181148980 (+5 ms)Closed at 1733181148980 2024-12-02T23:12:28,980 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1676): Region close journal for 8bc51cc2e95ea6782b0a19d49e8a7b9a: Waiting for close lock at 1733181148973Running coprocessor pre-close hooks at 1733181148973Disabling compacts and flushes for region at 1733181148973Disabling writes for close at 1733181148973Writing region close event to WAL at 1733181148974 (+1 ms)Running coprocessor post-close hooks at 1733181148980 (+6 ms)Closed at 1733181148980 2024-12-02T23:12:28,983 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(157): Closed 8bc51cc2e95ea6782b0a19d49e8a7b9a 2024-12-02T23:12:28,984 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=85 updating hbase:meta row=8bc51cc2e95ea6782b0a19d49e8a7b9a, regionState=CLOSED 2024-12-02T23:12:28,984 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(157): Closed b21f32e3497ff4e9c22800eb97703114 2024-12-02T23:12:28,986 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=84 updating hbase:meta row=b21f32e3497ff4e9c22800eb97703114, regionState=CLOSED 2024-12-02T23:12:28,987 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=86, ppid=85, state=RUNNABLE, hasLock=false; CloseRegionProcedure 8bc51cc2e95ea6782b0a19d49e8a7b9a, server=9e5ab3b623d7,38049,1733180945779 because future has completed 2024-12-02T23:12:28,988 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=87, ppid=84, state=RUNNABLE, hasLock=false; CloseRegionProcedure b21f32e3497ff4e9c22800eb97703114, server=9e5ab3b623d7,39847,1733180945622 because future has completed 2024-12-02T23:12:28,990 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=86, resume processing ppid=85 2024-12-02T23:12:28,990 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=86, ppid=85, state=SUCCESS, hasLock=false; CloseRegionProcedure 8bc51cc2e95ea6782b0a19d49e8a7b9a, server=9e5ab3b623d7,38049,1733180945779 in 169 msec 2024-12-02T23:12:28,991 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=87, resume processing ppid=84 2024-12-02T23:12:28,991 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=87, ppid=84, state=SUCCESS, hasLock=false; CloseRegionProcedure b21f32e3497ff4e9c22800eb97703114, server=9e5ab3b623d7,39847,1733180945622 in 169 msec 2024-12-02T23:12:28,992 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=85, ppid=83, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=8bc51cc2e95ea6782b0a19d49e8a7b9a, UNASSIGN in 176 msec 2024-12-02T23:12:28,993 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=84, resume processing ppid=83 2024-12-02T23:12:28,993 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=84, ppid=83, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=b21f32e3497ff4e9c22800eb97703114, UNASSIGN in 177 msec 2024-12-02T23:12:28,996 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=83, resume processing ppid=82 2024-12-02T23:12:28,996 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=83, ppid=82, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl in 181 msec 2024-12-02T23:12:28,997 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181148997"}]},"ts":"1733181148997"} 2024-12-02T23:12:28,999 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-02T23:12:28,999 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testExportWithResetTtl to state=DISABLED 2024-12-02T23:12:29,001 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=82, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportWithResetTtl in 193 msec 2024-12-02T23:12:29,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-02T23:12:29,123 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportWithResetTtl completed 2024-12-02T23:12:29,123 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testExportWithResetTtl 2024-12-02T23:12:29,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=88, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl 2024-12-02T23:12:29,126 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=88, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-02T23:12:29,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] access.PermissionStorage(261): Removing permissions of removed table testExportWithResetTtl 2024-12-02T23:12:29,126 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=88, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-02T23:12:29,131 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41311 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportWithResetTtl 2024-12-02T23:12:29,131 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportWithResetTtl/b21f32e3497ff4e9c22800eb97703114 2024-12-02T23:12:29,132 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportWithResetTtl/8bc51cc2e95ea6782b0a19d49e8a7b9a 2024-12-02T23:12:29,134 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportWithResetTtl/b21f32e3497ff4e9c22800eb97703114/cf, FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportWithResetTtl/b21f32e3497ff4e9c22800eb97703114/recovered.edits] 2024-12-02T23:12:29,135 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportWithResetTtl/8bc51cc2e95ea6782b0a19d49e8a7b9a/cf, FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportWithResetTtl/8bc51cc2e95ea6782b0a19d49e8a7b9a/recovered.edits] 2024-12-02T23:12:29,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-02T23:12:29,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-02T23:12:29,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-02T23:12:29,136 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-02T23:12:29,136 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-02T23:12:29,137 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-02T23:12:29,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-02T23:12:29,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:12:29,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-02T23:12:29,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:12:29,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-02T23:12:29,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:12:29,138 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data null 2024-12-02T23:12:29,138 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-02T23:12:29,139 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-02T23:12:29,139 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:12:29,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=88 2024-12-02T23:12:29,141 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T23:12:29,141 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T23:12:29,142 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T23:12:29,142 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T23:12:29,144 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportWithResetTtl/b21f32e3497ff4e9c22800eb97703114/cf/f2f842dea7e5495eb0d912702496070d to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testExportWithResetTtl/b21f32e3497ff4e9c22800eb97703114/cf/f2f842dea7e5495eb0d912702496070d 2024-12-02T23:12:29,144 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportWithResetTtl/8bc51cc2e95ea6782b0a19d49e8a7b9a/cf/03297db76ca245a7b94f73ee7985b979 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testExportWithResetTtl/8bc51cc2e95ea6782b0a19d49e8a7b9a/cf/03297db76ca245a7b94f73ee7985b979 2024-12-02T23:12:29,150 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportWithResetTtl/b21f32e3497ff4e9c22800eb97703114/recovered.edits/8.seqid to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testExportWithResetTtl/b21f32e3497ff4e9c22800eb97703114/recovered.edits/8.seqid 2024-12-02T23:12:29,150 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportWithResetTtl/8bc51cc2e95ea6782b0a19d49e8a7b9a/recovered.edits/8.seqid to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testExportWithResetTtl/8bc51cc2e95ea6782b0a19d49e8a7b9a/recovered.edits/8.seqid 2024-12-02T23:12:29,150 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportWithResetTtl/8bc51cc2e95ea6782b0a19d49e8a7b9a 2024-12-02T23:12:29,150 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportWithResetTtl/b21f32e3497ff4e9c22800eb97703114 2024-12-02T23:12:29,151 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testExportWithResetTtl regions 2024-12-02T23:12:29,151 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d 2024-12-02T23:12:29,152 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf] 2024-12-02T23:12:29,157 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b2024120270da488914d640d1b46f9e974d1299bd_8bc51cc2e95ea6782b0a19d49e8a7b9a to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b2024120270da488914d640d1b46f9e974d1299bd_8bc51cc2e95ea6782b0a19d49e8a7b9a 2024-12-02T23:12:29,158 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e202412029ff196b56ee543b0b10ca219e987a657_b21f32e3497ff4e9c22800eb97703114 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e202412029ff196b56ee543b0b10ca219e987a657_b21f32e3497ff4e9c22800eb97703114 2024-12-02T23:12:29,159 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d 2024-12-02T23:12:29,161 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=88, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-02T23:12:29,165 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testExportWithResetTtl from hbase:meta 2024-12-02T23:12:29,167 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testExportWithResetTtl' descriptor. 2024-12-02T23:12:29,169 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=88, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-02T23:12:29,169 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testExportWithResetTtl' from region states. 2024-12-02T23:12:29,169 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,,1733181123414.b21f32e3497ff4e9c22800eb97703114.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733181149169"}]},"ts":"9223372036854775807"} 2024-12-02T23:12:29,169 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,1,1733181123414.8bc51cc2e95ea6782b0a19d49e8a7b9a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733181149169"}]},"ts":"9223372036854775807"} 2024-12-02T23:12:29,171 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-02T23:12:29,171 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => b21f32e3497ff4e9c22800eb97703114, NAME => 'testExportWithResetTtl,,1733181123414.b21f32e3497ff4e9c22800eb97703114.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 8bc51cc2e95ea6782b0a19d49e8a7b9a, NAME => 'testExportWithResetTtl,1,1733181123414.8bc51cc2e95ea6782b0a19d49e8a7b9a.', STARTKEY => '1', ENDKEY => ''}] 2024-12-02T23:12:29,171 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testExportWithResetTtl' as deleted. 2024-12-02T23:12:29,172 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733181149172"}]},"ts":"9223372036854775807"} 2024-12-02T23:12:29,174 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testExportWithResetTtl state from META 2024-12-02T23:12:29,174 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=88, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-02T23:12:29,176 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=88, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl in 52 msec 2024-12-02T23:12:29,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=88 2024-12-02T23:12:29,242 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportWithResetTtl 2024-12-02T23:12:29,242 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportWithResetTtl completed 2024-12-02T23:12:29,243 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testExportWithResetTtl 2024-12-02T23:12:29,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=89, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl 2024-12-02T23:12:29,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-02T23:12:29,246 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181149246"}]},"ts":"1733181149246"} 2024-12-02T23:12:29,248 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-02T23:12:29,248 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithResetTtl to state=DISABLING 2024-12-02T23:12:29,248 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl}] 2024-12-02T23:12:29,250 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=876889227da0811eedb8889a1116e0f3, UNASSIGN}, {pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=ace763b190e7e1c4038a215dd6005521, UNASSIGN}] 2024-12-02T23:12:29,251 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=ace763b190e7e1c4038a215dd6005521, UNASSIGN 2024-12-02T23:12:29,251 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=876889227da0811eedb8889a1116e0f3, UNASSIGN 2024-12-02T23:12:29,252 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=92 updating hbase:meta row=ace763b190e7e1c4038a215dd6005521, regionState=CLOSING, regionLocation=9e5ab3b623d7,38049,1733180945779 2024-12-02T23:12:29,252 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=91 updating hbase:meta row=876889227da0811eedb8889a1116e0f3, regionState=CLOSING, regionLocation=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:12:29,253 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=ace763b190e7e1c4038a215dd6005521, UNASSIGN because future has completed 2024-12-02T23:12:29,253 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T23:12:29,254 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE, hasLock=false; CloseRegionProcedure ace763b190e7e1c4038a215dd6005521, server=9e5ab3b623d7,38049,1733180945779}] 2024-12-02T23:12:29,254 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=876889227da0811eedb8889a1116e0f3, UNASSIGN because future has completed 2024-12-02T23:12:29,254 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T23:12:29,255 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=94, ppid=91, state=RUNNABLE, hasLock=false; CloseRegionProcedure 876889227da0811eedb8889a1116e0f3, server=9e5ab3b623d7,41311,1733180945828}] 2024-12-02T23:12:29,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-02T23:12:29,406 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(122): Close ace763b190e7e1c4038a215dd6005521 2024-12-02T23:12:29,406 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T23:12:29,407 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1722): Closing ace763b190e7e1c4038a215dd6005521, disabling compactions & flushes 2024-12-02T23:12:29,407 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1733181121203.ace763b190e7e1c4038a215dd6005521. 2024-12-02T23:12:29,407 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1733181121203.ace763b190e7e1c4038a215dd6005521. 2024-12-02T23:12:29,407 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1733181121203.ace763b190e7e1c4038a215dd6005521. after waiting 0 ms 2024-12-02T23:12:29,407 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1733181121203.ace763b190e7e1c4038a215dd6005521. 2024-12-02T23:12:29,407 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(122): Close 876889227da0811eedb8889a1116e0f3 2024-12-02T23:12:29,407 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T23:12:29,407 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1722): Closing 876889227da0811eedb8889a1116e0f3, disabling compactions & flushes 2024-12-02T23:12:29,408 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1733181121203.876889227da0811eedb8889a1116e0f3. 2024-12-02T23:12:29,408 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1733181121203.876889227da0811eedb8889a1116e0f3. 2024-12-02T23:12:29,408 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1733181121203.876889227da0811eedb8889a1116e0f3. after waiting 0 ms 2024-12-02T23:12:29,408 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1733181121203.876889227da0811eedb8889a1116e0f3. 2024-12-02T23:12:29,444 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithResetTtl/ace763b190e7e1c4038a215dd6005521/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T23:12:29,445 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:12:29,445 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1733181121203.ace763b190e7e1c4038a215dd6005521. 2024-12-02T23:12:29,445 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1676): Region close journal for ace763b190e7e1c4038a215dd6005521: Waiting for close lock at 1733181149407Running coprocessor pre-close hooks at 1733181149407Disabling compacts and flushes for region at 1733181149407Disabling writes for close at 1733181149407Writing region close event to WAL at 1733181149419 (+12 ms)Running coprocessor post-close hooks at 1733181149445 (+26 ms)Closed at 1733181149445 2024-12-02T23:12:29,448 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(157): Closed ace763b190e7e1c4038a215dd6005521 2024-12-02T23:12:29,449 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=92 updating hbase:meta row=ace763b190e7e1c4038a215dd6005521, regionState=CLOSED 2024-12-02T23:12:29,451 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=93, ppid=92, state=RUNNABLE, hasLock=false; CloseRegionProcedure ace763b190e7e1c4038a215dd6005521, server=9e5ab3b623d7,38049,1733180945779 because future has completed 2024-12-02T23:12:29,456 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=93, resume processing ppid=92 2024-12-02T23:12:29,456 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=93, ppid=92, state=SUCCESS, hasLock=false; CloseRegionProcedure ace763b190e7e1c4038a215dd6005521, server=9e5ab3b623d7,38049,1733180945779 in 198 msec 2024-12-02T23:12:29,458 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=92, ppid=90, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=ace763b190e7e1c4038a215dd6005521, UNASSIGN in 206 msec 2024-12-02T23:12:29,463 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithResetTtl/876889227da0811eedb8889a1116e0f3/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T23:12:29,464 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:12:29,464 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1733181121203.876889227da0811eedb8889a1116e0f3. 2024-12-02T23:12:29,464 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1676): Region close journal for 876889227da0811eedb8889a1116e0f3: Waiting for close lock at 1733181149407Running coprocessor pre-close hooks at 1733181149407Disabling compacts and flushes for region at 1733181149407Disabling writes for close at 1733181149408 (+1 ms)Writing region close event to WAL at 1733181149420 (+12 ms)Running coprocessor post-close hooks at 1733181149464 (+44 ms)Closed at 1733181149464 2024-12-02T23:12:29,466 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(157): Closed 876889227da0811eedb8889a1116e0f3 2024-12-02T23:12:29,467 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=91 updating hbase:meta row=876889227da0811eedb8889a1116e0f3, regionState=CLOSED 2024-12-02T23:12:29,470 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=94, ppid=91, state=RUNNABLE, hasLock=false; CloseRegionProcedure 876889227da0811eedb8889a1116e0f3, server=9e5ab3b623d7,41311,1733180945828 because future has completed 2024-12-02T23:12:29,473 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=94, resume processing ppid=91 2024-12-02T23:12:29,473 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=94, ppid=91, state=SUCCESS, hasLock=false; CloseRegionProcedure 876889227da0811eedb8889a1116e0f3, server=9e5ab3b623d7,41311,1733180945828 in 216 msec 2024-12-02T23:12:29,476 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=91, resume processing ppid=90 2024-12-02T23:12:29,476 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=91, ppid=90, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=876889227da0811eedb8889a1116e0f3, UNASSIGN in 223 msec 2024-12-02T23:12:29,479 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=90, resume processing ppid=89 2024-12-02T23:12:29,479 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=90, ppid=89, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl in 228 msec 2024-12-02T23:12:29,481 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181149480"}]},"ts":"1733181149480"} 2024-12-02T23:12:29,483 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-02T23:12:29,484 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithResetTtl to state=DISABLED 2024-12-02T23:12:29,486 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=89, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl in 241 msec 2024-12-02T23:12:29,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-02T23:12:29,563 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-02T23:12:29,564 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testExportWithResetTtl 2024-12-02T23:12:29,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-02T23:12:29,566 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-02T23:12:29,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithResetTtl 2024-12-02T23:12:29,567 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=95, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-02T23:12:29,571 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41311 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithResetTtl 2024-12-02T23:12:29,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-02T23:12:29,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-02T23:12:29,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-02T23:12:29,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-02T23:12:29,576 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-02T23:12:29,576 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-02T23:12:29,576 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-02T23:12:29,576 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-02T23:12:29,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-02T23:12:29,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:12:29,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-02T23:12:29,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:12:29,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-02T23:12:29,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-02T23:12:29,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:12:29,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:12:29,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=95 2024-12-02T23:12:29,581 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithResetTtl/876889227da0811eedb8889a1116e0f3 2024-12-02T23:12:29,581 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithResetTtl/ace763b190e7e1c4038a215dd6005521 2024-12-02T23:12:29,584 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithResetTtl/876889227da0811eedb8889a1116e0f3/cf, FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithResetTtl/876889227da0811eedb8889a1116e0f3/recovered.edits] 2024-12-02T23:12:29,585 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithResetTtl/ace763b190e7e1c4038a215dd6005521/cf, FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithResetTtl/ace763b190e7e1c4038a215dd6005521/recovered.edits] 2024-12-02T23:12:29,592 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithResetTtl/ace763b190e7e1c4038a215dd6005521/cf/31cf6eb94c594658816e1639ed0a10fe to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportWithResetTtl/ace763b190e7e1c4038a215dd6005521/cf/31cf6eb94c594658816e1639ed0a10fe 2024-12-02T23:12:29,592 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithResetTtl/876889227da0811eedb8889a1116e0f3/cf/909aa0b9bfc7465c9ad3e4ce01b84247 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportWithResetTtl/876889227da0811eedb8889a1116e0f3/cf/909aa0b9bfc7465c9ad3e4ce01b84247 2024-12-02T23:12:29,596 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithResetTtl/ace763b190e7e1c4038a215dd6005521/recovered.edits/9.seqid to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportWithResetTtl/ace763b190e7e1c4038a215dd6005521/recovered.edits/9.seqid 2024-12-02T23:12:29,597 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithResetTtl/876889227da0811eedb8889a1116e0f3/recovered.edits/9.seqid to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportWithResetTtl/876889227da0811eedb8889a1116e0f3/recovered.edits/9.seqid 2024-12-02T23:12:29,597 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithResetTtl/ace763b190e7e1c4038a215dd6005521 2024-12-02T23:12:29,598 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithResetTtl/876889227da0811eedb8889a1116e0f3 2024-12-02T23:12:29,598 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithResetTtl regions 2024-12-02T23:12:29,598 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e 2024-12-02T23:12:29,599 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf] 2024-12-02T23:12:29,603 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b20241202167229515bdd45f4b297f7059c534345_ace763b190e7e1c4038a215dd6005521 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b20241202167229515bdd45f4b297f7059c534345_ace763b190e7e1c4038a215dd6005521 2024-12-02T23:12:29,605 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241202466f4a7722a5408f86f361e6659074de_876889227da0811eedb8889a1116e0f3 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e20241202466f4a7722a5408f86f361e6659074de_876889227da0811eedb8889a1116e0f3 2024-12-02T23:12:29,605 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e 2024-12-02T23:12:29,608 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=95, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-02T23:12:29,611 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithResetTtl from hbase:meta 2024-12-02T23:12:29,617 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithResetTtl' descriptor. 2024-12-02T23:12:29,619 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=95, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-02T23:12:29,619 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithResetTtl' from region states. 2024-12-02T23:12:29,619 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,,1733181121203.876889227da0811eedb8889a1116e0f3.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733181149619"}]},"ts":"9223372036854775807"} 2024-12-02T23:12:29,619 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,1,1733181121203.ace763b190e7e1c4038a215dd6005521.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733181149619"}]},"ts":"9223372036854775807"} 2024-12-02T23:12:29,622 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-02T23:12:29,622 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 876889227da0811eedb8889a1116e0f3, NAME => 'testtb-testExportWithResetTtl,,1733181121203.876889227da0811eedb8889a1116e0f3.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => ace763b190e7e1c4038a215dd6005521, NAME => 'testtb-testExportWithResetTtl,1,1733181121203.ace763b190e7e1c4038a215dd6005521.', STARTKEY => '1', ENDKEY => ''}] 2024-12-02T23:12:29,622 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithResetTtl' as deleted. 2024-12-02T23:12:29,622 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733181149622"}]},"ts":"9223372036854775807"} 2024-12-02T23:12:29,624 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithResetTtl state from META 2024-12-02T23:12:29,625 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=95, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-02T23:12:29,626 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=95, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl in 61 msec 2024-12-02T23:12:29,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=95 2024-12-02T23:12:29,683 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithResetTtl 2024-12-02T23:12:29,683 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-02T23:12:29,694 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testExportWithResetTtl" type: DISABLED 2024-12-02T23:12:29,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithResetTtl 2024-12-02T23:12:29,700 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb-testExportWithResetTtl" type: DISABLED 2024-12-02T23:12:29,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb-testExportWithResetTtl 2024-12-02T23:12:29,703 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testExportWithResetTtl" type: DISABLED 2024-12-02T23:12:29,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithResetTtl 2024-12-02T23:12:29,738 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithResetTtl Thread=792 (was 781) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_71179475_22 at /127.0.0.1:52580 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46243 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (243496960) connection to localhost/127.0.0.1:46243 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40229 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 29330) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_71179475_22 at /127.0.0.1:47470 [Waiting for operation #8] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ApplicationMasterLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (243496960) connection to localhost/127.0.0.1:40229 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MoveIntermediateToDone Thread #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-196966568_1 at /127.0.0.1:47440 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-3044 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_71179475_22 at /127.0.0.1:35262 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40229 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=796 (was 789) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=906 (was 796) - SystemLoadAverage LEAK? -, ProcessCount=16 (was 19), AvailableMemoryMB=2277 (was 2647) 2024-12-02T23:12:29,739 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=792 is superior to 500 2024-12-02T23:12:29,760 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemState Thread=792, OpenFileDescriptor=796, MaxFileDescriptor=1048576, SystemLoadAverage=906, ProcessCount=16, AvailableMemoryMB=2271 2024-12-02T23:12:29,761 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=792 is superior to 500 2024-12-02T23:12:29,763 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T23:12:29,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState 2024-12-02T23:12:29,766 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T23:12:29,766 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemState" procId is: 96 2024-12-02T23:12:29,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-02T23:12:29,768 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T23:12:29,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742037_1213 (size=443) 2024-12-02T23:12:29,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742037_1213 (size=443) 2024-12-02T23:12:29,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742037_1213 (size=443) 2024-12-02T23:12:29,792 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 04775e65fe6ef103e69037e0e1af437b, NAME => 'testtb-testExportFileSystemState,,1733181149762.04775e65fe6ef103e69037e0e1af437b.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:12:29,792 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => ac52261d15a01e439a3454a7a0254314, NAME => 'testtb-testExportFileSystemState,1,1733181149762.ac52261d15a01e439a3454a7a0254314.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:12:29,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742038_1214 (size=68) 2024-12-02T23:12:29,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742038_1214 (size=68) 2024-12-02T23:12:29,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742038_1214 (size=68) 2024-12-02T23:12:29,828 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1733181149762.04775e65fe6ef103e69037e0e1af437b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:12:29,828 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing 04775e65fe6ef103e69037e0e1af437b, disabling compactions & flushes 2024-12-02T23:12:29,828 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1733181149762.04775e65fe6ef103e69037e0e1af437b. 2024-12-02T23:12:29,828 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1733181149762.04775e65fe6ef103e69037e0e1af437b. 2024-12-02T23:12:29,828 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1733181149762.04775e65fe6ef103e69037e0e1af437b. after waiting 0 ms 2024-12-02T23:12:29,828 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1733181149762.04775e65fe6ef103e69037e0e1af437b. 2024-12-02T23:12:29,828 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1733181149762.04775e65fe6ef103e69037e0e1af437b. 2024-12-02T23:12:29,828 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for 04775e65fe6ef103e69037e0e1af437b: Waiting for close lock at 1733181149828Disabling compacts and flushes for region at 1733181149828Disabling writes for close at 1733181149828Writing region close event to WAL at 1733181149828Closed at 1733181149828 2024-12-02T23:12:29,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742039_1215 (size=68) 2024-12-02T23:12:29,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742039_1215 (size=68) 2024-12-02T23:12:29,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742039_1215 (size=68) 2024-12-02T23:12:29,832 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1733181149762.ac52261d15a01e439a3454a7a0254314.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:12:29,832 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing ac52261d15a01e439a3454a7a0254314, disabling compactions & flushes 2024-12-02T23:12:29,832 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1733181149762.ac52261d15a01e439a3454a7a0254314. 2024-12-02T23:12:29,832 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1733181149762.ac52261d15a01e439a3454a7a0254314. 2024-12-02T23:12:29,832 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1733181149762.ac52261d15a01e439a3454a7a0254314. after waiting 0 ms 2024-12-02T23:12:29,832 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1733181149762.ac52261d15a01e439a3454a7a0254314. 2024-12-02T23:12:29,833 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1733181149762.ac52261d15a01e439a3454a7a0254314. 2024-12-02T23:12:29,833 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for ac52261d15a01e439a3454a7a0254314: Waiting for close lock at 1733181149832Disabling compacts and flushes for region at 1733181149832Disabling writes for close at 1733181149832Writing region close event to WAL at 1733181149832Closed at 1733181149832 2024-12-02T23:12:29,834 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T23:12:29,835 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,,1733181149762.04775e65fe6ef103e69037e0e1af437b.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733181149834"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733181149834"}]},"ts":"1733181149834"} 2024-12-02T23:12:29,835 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,1,1733181149762.ac52261d15a01e439a3454a7a0254314.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733181149834"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733181149834"}]},"ts":"1733181149834"} 2024-12-02T23:12:29,837 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T23:12:29,838 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T23:12:29,839 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181149838"}]},"ts":"1733181149838"} 2024-12-02T23:12:29,840 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLING in hbase:meta 2024-12-02T23:12:29,841 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {9e5ab3b623d7=0} racks are {/default-rack=0} 2024-12-02T23:12:29,842 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T23:12:29,842 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T23:12:29,842 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T23:12:29,842 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T23:12:29,842 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T23:12:29,842 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T23:12:29,842 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T23:12:29,842 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T23:12:29,842 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T23:12:29,842 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T23:12:29,843 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=04775e65fe6ef103e69037e0e1af437b, ASSIGN}, {pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ac52261d15a01e439a3454a7a0254314, ASSIGN}] 2024-12-02T23:12:29,844 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ac52261d15a01e439a3454a7a0254314, ASSIGN 2024-12-02T23:12:29,844 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=04775e65fe6ef103e69037e0e1af437b, ASSIGN 2024-12-02T23:12:29,845 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ac52261d15a01e439a3454a7a0254314, ASSIGN; state=OFFLINE, location=9e5ab3b623d7,38049,1733180945779; forceNewPlan=false, retain=false 2024-12-02T23:12:29,845 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=04775e65fe6ef103e69037e0e1af437b, ASSIGN; state=OFFLINE, location=9e5ab3b623d7,39847,1733180945622; forceNewPlan=false, retain=false 2024-12-02T23:12:29,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-02T23:12:29,995 INFO [9e5ab3b623d7:41513 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T23:12:29,996 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=98 updating hbase:meta row=ac52261d15a01e439a3454a7a0254314, regionState=OPENING, regionLocation=9e5ab3b623d7,38049,1733180945779 2024-12-02T23:12:29,996 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=97 updating hbase:meta row=04775e65fe6ef103e69037e0e1af437b, regionState=OPENING, regionLocation=9e5ab3b623d7,39847,1733180945622 2024-12-02T23:12:29,998 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ac52261d15a01e439a3454a7a0254314, ASSIGN because future has completed 2024-12-02T23:12:29,998 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE, hasLock=false; OpenRegionProcedure ac52261d15a01e439a3454a7a0254314, server=9e5ab3b623d7,38049,1733180945779}] 2024-12-02T23:12:29,999 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=04775e65fe6ef103e69037e0e1af437b, ASSIGN because future has completed 2024-12-02T23:12:30,000 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=100, ppid=97, state=RUNNABLE, hasLock=false; OpenRegionProcedure 04775e65fe6ef103e69037e0e1af437b, server=9e5ab3b623d7,39847,1733180945622}] 2024-12-02T23:12:30,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-02T23:12:30,154 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,1,1733181149762.ac52261d15a01e439a3454a7a0254314. 2024-12-02T23:12:30,155 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7752): Opening region: {ENCODED => ac52261d15a01e439a3454a7a0254314, NAME => 'testtb-testExportFileSystemState,1,1733181149762.ac52261d15a01e439a3454a7a0254314.', STARTKEY => '1', ENDKEY => ''} 2024-12-02T23:12:30,155 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,,1733181149762.04775e65fe6ef103e69037e0e1af437b. 2024-12-02T23:12:30,155 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7752): Opening region: {ENCODED => 04775e65fe6ef103e69037e0e1af437b, NAME => 'testtb-testExportFileSystemState,,1733181149762.04775e65fe6ef103e69037e0e1af437b.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T23:12:30,155 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,1,1733181149762.ac52261d15a01e439a3454a7a0254314. service=AccessControlService 2024-12-02T23:12:30,155 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,,1733181149762.04775e65fe6ef103e69037e0e1af437b. service=AccessControlService 2024-12-02T23:12:30,155 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T23:12:30,155 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T23:12:30,155 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState ac52261d15a01e439a3454a7a0254314 2024-12-02T23:12:30,156 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1733181149762.ac52261d15a01e439a3454a7a0254314.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:12:30,156 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 04775e65fe6ef103e69037e0e1af437b 2024-12-02T23:12:30,156 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1733181149762.04775e65fe6ef103e69037e0e1af437b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:12:30,156 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7794): checking encryption for ac52261d15a01e439a3454a7a0254314 2024-12-02T23:12:30,156 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7794): checking encryption for 04775e65fe6ef103e69037e0e1af437b 2024-12-02T23:12:30,156 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7797): checking classloading for 04775e65fe6ef103e69037e0e1af437b 2024-12-02T23:12:30,156 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7797): checking classloading for ac52261d15a01e439a3454a7a0254314 2024-12-02T23:12:30,157 INFO [StoreOpener-04775e65fe6ef103e69037e0e1af437b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 04775e65fe6ef103e69037e0e1af437b 2024-12-02T23:12:30,157 INFO [StoreOpener-ac52261d15a01e439a3454a7a0254314-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ac52261d15a01e439a3454a7a0254314 2024-12-02T23:12:30,159 INFO [StoreOpener-ac52261d15a01e439a3454a7a0254314-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ac52261d15a01e439a3454a7a0254314 columnFamilyName cf 2024-12-02T23:12:30,159 INFO [StoreOpener-04775e65fe6ef103e69037e0e1af437b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 04775e65fe6ef103e69037e0e1af437b columnFamilyName cf 2024-12-02T23:12:30,160 DEBUG [StoreOpener-04775e65fe6ef103e69037e0e1af437b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:12:30,160 DEBUG [StoreOpener-ac52261d15a01e439a3454a7a0254314-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:12:30,161 INFO [StoreOpener-04775e65fe6ef103e69037e0e1af437b-1 {}] regionserver.HStore(327): Store=04775e65fe6ef103e69037e0e1af437b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T23:12:30,161 INFO [StoreOpener-ac52261d15a01e439a3454a7a0254314-1 {}] regionserver.HStore(327): Store=ac52261d15a01e439a3454a7a0254314/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T23:12:30,161 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1038): replaying wal for 04775e65fe6ef103e69037e0e1af437b 2024-12-02T23:12:30,161 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1038): replaying wal for ac52261d15a01e439a3454a7a0254314 2024-12-02T23:12:30,162 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemState/04775e65fe6ef103e69037e0e1af437b 2024-12-02T23:12:30,162 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemState/ac52261d15a01e439a3454a7a0254314 2024-12-02T23:12:30,162 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemState/04775e65fe6ef103e69037e0e1af437b 2024-12-02T23:12:30,162 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemState/ac52261d15a01e439a3454a7a0254314 2024-12-02T23:12:30,163 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1048): stopping wal replay for ac52261d15a01e439a3454a7a0254314 2024-12-02T23:12:30,163 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1048): stopping wal replay for 04775e65fe6ef103e69037e0e1af437b 2024-12-02T23:12:30,163 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1060): Cleaning up temporary data for ac52261d15a01e439a3454a7a0254314 2024-12-02T23:12:30,163 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1060): Cleaning up temporary data for 04775e65fe6ef103e69037e0e1af437b 2024-12-02T23:12:30,165 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1093): writing seq id for 04775e65fe6ef103e69037e0e1af437b 2024-12-02T23:12:30,165 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1093): writing seq id for ac52261d15a01e439a3454a7a0254314 2024-12-02T23:12:30,167 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemState/04775e65fe6ef103e69037e0e1af437b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T23:12:30,167 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemState/ac52261d15a01e439a3454a7a0254314/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T23:12:30,168 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1114): Opened 04775e65fe6ef103e69037e0e1af437b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68815695, jitterRate=0.02543376386165619}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T23:12:30,168 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 04775e65fe6ef103e69037e0e1af437b 2024-12-02T23:12:30,168 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1114): Opened ac52261d15a01e439a3454a7a0254314; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62068687, jitterRate=-0.0751044899225235}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T23:12:30,168 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ac52261d15a01e439a3454a7a0254314 2024-12-02T23:12:30,169 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1006): Region open journal for ac52261d15a01e439a3454a7a0254314: Running coprocessor pre-open hook at 1733181150156Writing region info on filesystem at 1733181150156Initializing all the Stores at 1733181150157 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733181150157Cleaning up temporary data from old regions at 1733181150163 (+6 ms)Running coprocessor post-open hooks at 1733181150168 (+5 ms)Region opened successfully at 1733181150169 (+1 ms) 2024-12-02T23:12:30,169 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1006): Region open journal for 04775e65fe6ef103e69037e0e1af437b: Running coprocessor pre-open hook at 1733181150156Writing region info on filesystem at 1733181150156Initializing all the Stores at 1733181150157 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733181150157Cleaning up temporary data from old regions at 1733181150163 (+6 ms)Running coprocessor post-open hooks at 1733181150168 (+5 ms)Region opened successfully at 1733181150169 (+1 ms) 2024-12-02T23:12:30,170 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,,1733181149762.04775e65fe6ef103e69037e0e1af437b., pid=100, masterSystemTime=1733181150152 2024-12-02T23:12:30,170 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,1,1733181149762.ac52261d15a01e439a3454a7a0254314., pid=99, masterSystemTime=1733181150151 2024-12-02T23:12:30,171 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,,1733181149762.04775e65fe6ef103e69037e0e1af437b. 2024-12-02T23:12:30,171 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,,1733181149762.04775e65fe6ef103e69037e0e1af437b. 2024-12-02T23:12:30,172 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=97 updating hbase:meta row=04775e65fe6ef103e69037e0e1af437b, regionState=OPEN, openSeqNum=2, regionLocation=9e5ab3b623d7,39847,1733180945622 2024-12-02T23:12:30,172 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,1,1733181149762.ac52261d15a01e439a3454a7a0254314. 2024-12-02T23:12:30,172 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,1,1733181149762.ac52261d15a01e439a3454a7a0254314. 2024-12-02T23:12:30,173 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=98 updating hbase:meta row=ac52261d15a01e439a3454a7a0254314, regionState=OPEN, openSeqNum=2, regionLocation=9e5ab3b623d7,38049,1733180945779 2024-12-02T23:12:30,175 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=100, ppid=97, state=RUNNABLE, hasLock=false; OpenRegionProcedure 04775e65fe6ef103e69037e0e1af437b, server=9e5ab3b623d7,39847,1733180945622 because future has completed 2024-12-02T23:12:30,175 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=99, ppid=98, state=RUNNABLE, hasLock=false; OpenRegionProcedure ac52261d15a01e439a3454a7a0254314, server=9e5ab3b623d7,38049,1733180945779 because future has completed 2024-12-02T23:12:30,178 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=100, resume processing ppid=97 2024-12-02T23:12:30,179 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=99, resume processing ppid=98 2024-12-02T23:12:30,179 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=99, ppid=98, state=SUCCESS, hasLock=false; OpenRegionProcedure ac52261d15a01e439a3454a7a0254314, server=9e5ab3b623d7,38049,1733180945779 in 178 msec 2024-12-02T23:12:30,179 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=100, ppid=97, state=SUCCESS, hasLock=false; OpenRegionProcedure 04775e65fe6ef103e69037e0e1af437b, server=9e5ab3b623d7,39847,1733180945622 in 176 msec 2024-12-02T23:12:30,180 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=97, ppid=96, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=04775e65fe6ef103e69037e0e1af437b, ASSIGN in 335 msec 2024-12-02T23:12:30,181 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=98, resume processing ppid=96 2024-12-02T23:12:30,181 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=98, ppid=96, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ac52261d15a01e439a3454a7a0254314, ASSIGN in 336 msec 2024-12-02T23:12:30,182 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T23:12:30,182 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181150182"}]},"ts":"1733181150182"} 2024-12-02T23:12:30,184 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLED in hbase:meta 2024-12-02T23:12:30,185 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T23:12:30,185 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemState jenkins: RWXCA 2024-12-02T23:12:30,188 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41311 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-02T23:12:30,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:12:30,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:12:30,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:12:30,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:12:30,192 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-02T23:12:30,192 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-02T23:12:30,193 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-02T23:12:30,193 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-02T23:12:30,194 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=96, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState in 429 msec 2024-12-02T23:12:30,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-02T23:12:30,393 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-02T23:12:30,393 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T23:12:30,398 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-12-02T23:12:30,399 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1733181149762.04775e65fe6ef103e69037e0e1af437b. 2024-12-02T23:12:30,399 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T23:12:30,400 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T23:12:30,409 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T23:12:30,419 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T23:12:30,424 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-02T23:12:30,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733181150424 (current time:1733181150424). 2024-12-02T23:12:30,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T23:12:30,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-02T23:12:30,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T23:12:30,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b317046, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:30,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:12:30,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:12:30,433 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:12:30,434 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:12:30,434 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:12:30,434 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78bc5016, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:30,434 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:12:30,434 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:12:30,434 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:30,435 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49638, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:12:30,436 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22952d9d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:30,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:12:30,438 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:12:30,438 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:12:30,439 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39148, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:12:30,441 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513. 2024-12-02T23:12:30,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:12:30,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:30,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:30,441 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:12:30,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@eba9283, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:30,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:12:30,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:12:30,443 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:12:30,443 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:12:30,444 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:12:30,444 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@159f464b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:30,444 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:12:30,444 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:12:30,444 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:30,445 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49654, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:12:30,446 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a762723, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:30,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:12:30,448 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:12:30,448 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:12:30,449 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39162, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:12:30,451 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b., hostname=9e5ab3b623d7,41311,1733180945828, seqNum=2] 2024-12-02T23:12:30,451 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:12:30,452 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49402, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:12:30,453 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513. 2024-12-02T23:12:30,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor223.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:12:30,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:30,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:30,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-02T23:12:30,454 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:12:30,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T23:12:30,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=101, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-02T23:12:30,462 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T23:12:30,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 101 2024-12-02T23:12:30,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-02T23:12:30,464 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T23:12:30,468 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T23:12:30,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742040_1216 (size=170) 2024-12-02T23:12:30,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742040_1216 (size=170) 2024-12-02T23:12:30,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742040_1216 (size=170) 2024-12-02T23:12:30,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-02T23:12:30,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-02T23:12:30,889 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T23:12:30,889 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 04775e65fe6ef103e69037e0e1af437b}, {pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ac52261d15a01e439a3454a7a0254314}] 2024-12-02T23:12:30,891 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ac52261d15a01e439a3454a7a0254314 2024-12-02T23:12:30,891 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 04775e65fe6ef103e69037e0e1af437b 2024-12-02T23:12:31,043 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39847 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=102 2024-12-02T23:12:31,043 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38049 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=103 2024-12-02T23:12:31,043 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1733181149762.04775e65fe6ef103e69037e0e1af437b. 2024-12-02T23:12:31,044 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1733181149762.ac52261d15a01e439a3454a7a0254314. 2024-12-02T23:12:31,044 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.HRegion(2603): Flush status journal for 04775e65fe6ef103e69037e0e1af437b: 2024-12-02T23:12:31,044 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1733181149762.04775e65fe6ef103e69037e0e1af437b. for emptySnaptb0-testExportFileSystemState completed. 2024-12-02T23:12:31,044 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(2603): Flush status journal for ac52261d15a01e439a3454a7a0254314: 2024-12-02T23:12:31,044 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1733181149762.ac52261d15a01e439a3454a7a0254314. for emptySnaptb0-testExportFileSystemState completed. 2024-12-02T23:12:31,044 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1733181149762.04775e65fe6ef103e69037e0e1af437b.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-02T23:12:31,044 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:12:31,044 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T23:12:31,044 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1733181149762.ac52261d15a01e439a3454a7a0254314.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-02T23:12:31,044 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:12:31,044 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T23:12:31,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742041_1217 (size=71) 2024-12-02T23:12:31,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742042_1218 (size=71) 2024-12-02T23:12:31,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742041_1217 (size=71) 2024-12-02T23:12:31,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742041_1217 (size=71) 2024-12-02T23:12:31,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742042_1218 (size=71) 2024-12-02T23:12:31,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742042_1218 (size=71) 2024-12-02T23:12:31,065 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1733181149762.ac52261d15a01e439a3454a7a0254314. 2024-12-02T23:12:31,065 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-12-02T23:12:31,065 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1733181149762.04775e65fe6ef103e69037e0e1af437b. 2024-12-02T23:12:31,065 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=102 2024-12-02T23:12:31,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=102 2024-12-02T23:12:31,066 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 04775e65fe6ef103e69037e0e1af437b 2024-12-02T23:12:31,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=103 2024-12-02T23:12:31,066 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region ac52261d15a01e439a3454a7a0254314 2024-12-02T23:12:31,066 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 04775e65fe6ef103e69037e0e1af437b 2024-12-02T23:12:31,066 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ac52261d15a01e439a3454a7a0254314 2024-12-02T23:12:31,069 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=102, ppid=101, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 04775e65fe6ef103e69037e0e1af437b in 178 msec 2024-12-02T23:12:31,070 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=103, resume processing ppid=101 2024-12-02T23:12:31,070 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=103, ppid=101, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ac52261d15a01e439a3454a7a0254314 in 179 msec 2024-12-02T23:12:31,070 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T23:12:31,071 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T23:12:31,072 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T23:12:31,072 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T23:12:31,072 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:12:31,073 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-02T23:12:31,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742043_1219 (size=63) 2024-12-02T23:12:31,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742043_1219 (size=63) 2024-12-02T23:12:31,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742043_1219 (size=63) 2024-12-02T23:12:31,085 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T23:12:31,085 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemState 2024-12-02T23:12:31,085 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState 2024-12-02T23:12:31,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-02T23:12:31,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742044_1220 (size=653) 2024-12-02T23:12:31,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742044_1220 (size=653) 2024-12-02T23:12:31,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742044_1220 (size=653) 2024-12-02T23:12:31,111 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T23:12:31,117 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T23:12:31,118 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/emptySnaptb0-testExportFileSystemState 2024-12-02T23:12:31,120 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T23:12:31,120 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 101 2024-12-02T23:12:31,123 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=101, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 666 msec 2024-12-02T23:12:31,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-02T23:12:31,603 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-12-02T23:12:31,612 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39847 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,,1733181149762.04775e65fe6ef103e69037e0e1af437b. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T23:12:31,615 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38049 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,1,1733181149762.ac52261d15a01e439a3454a7a0254314. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T23:12:31,616 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T23:12:31,620 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-12-02T23:12:31,620 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1733181149762.04775e65fe6ef103e69037e0e1af437b. 2024-12-02T23:12:31,620 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T23:12:31,622 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T23:12:31,628 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T23:12:31,641 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T23:12:31,648 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-02T23:12:31,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733181151649 (current time:1733181151649). 2024-12-02T23:12:31,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T23:12:31,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-02T23:12:31,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T23:12:31,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42211fe5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:31,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:12:31,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:12:31,653 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:12:31,654 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:12:31,654 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:12:31,654 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75463a6d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:31,654 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:12:31,654 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:12:31,655 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:31,656 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49676, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:12:31,657 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@509beeac, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:31,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:12:31,658 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:12:31,658 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:12:31,660 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39164, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:12:31,662 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:12:31,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:12:31,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:31,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:31,662 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:12:31,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49899460, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:31,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:12:31,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:12:31,668 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:12:31,669 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:12:31,669 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:12:31,669 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73085780, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:31,669 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:12:31,669 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:12:31,669 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:31,670 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51124, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:12:31,671 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b3ec344, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:31,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:12:31,672 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:12:31,673 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:12:31,677 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52926, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:12:31,679 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b., hostname=9e5ab3b623d7,41311,1733180945828, seqNum=2] 2024-12-02T23:12:31,680 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:12:31,681 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51944, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:12:31,685 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:12:31,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor223.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:12:31,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:31,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:31,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-02T23:12:31,686 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:12:31,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T23:12:31,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-02T23:12:31,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-02T23:12:31,698 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T23:12:31,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-02T23:12:31,699 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T23:12:31,702 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T23:12:31,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742045_1221 (size=165) 2024-12-02T23:12:31,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742045_1221 (size=165) 2024-12-02T23:12:31,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742045_1221 (size=165) 2024-12-02T23:12:31,732 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T23:12:31,732 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 04775e65fe6ef103e69037e0e1af437b}, {pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ac52261d15a01e439a3454a7a0254314}] 2024-12-02T23:12:31,735 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 04775e65fe6ef103e69037e0e1af437b 2024-12-02T23:12:31,735 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ac52261d15a01e439a3454a7a0254314 2024-12-02T23:12:31,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-02T23:12:31,893 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39847 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=105 2024-12-02T23:12:31,893 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38049 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=106 2024-12-02T23:12:31,893 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1733181149762.04775e65fe6ef103e69037e0e1af437b. 2024-12-02T23:12:31,894 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2902): Flushing 04775e65fe6ef103e69037e0e1af437b 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-02T23:12:31,895 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1733181149762.ac52261d15a01e439a3454a7a0254314. 2024-12-02T23:12:31,895 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2902): Flushing ac52261d15a01e439a3454a7a0254314 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-02T23:12:31,943 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120256f31ab8e01545a7a2767a8ce42aae29_04775e65fe6ef103e69037e0e1af437b is 71, key is 0716e69f87f5953f5c867753847c18af/cf:q/1733181151612/Put/seqid=0 2024-12-02T23:12:31,950 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202412024f70b831ddb04ce5902c57efbb2e3bb1_ac52261d15a01e439a3454a7a0254314 is 71, key is 15ddbfaf0d5227768c544aff816ffd1c/cf:q/1733181151615/Put/seqid=0 2024-12-02T23:12:31,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742046_1222 (size=5171) 2024-12-02T23:12:31,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742046_1222 (size=5171) 2024-12-02T23:12:31,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742046_1222 (size=5171) 2024-12-02T23:12:31,988 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:12:31,997 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120256f31ab8e01545a7a2767a8ce42aae29_04775e65fe6ef103e69037e0e1af437b to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e2024120256f31ab8e01545a7a2767a8ce42aae29_04775e65fe6ef103e69037e0e1af437b 2024-12-02T23:12:31,998 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemState/04775e65fe6ef103e69037e0e1af437b/.tmp/cf/15a886d5714c4a08b02c764f7802317a, store: [table=testtb-testExportFileSystemState family=cf region=04775e65fe6ef103e69037e0e1af437b] 2024-12-02T23:12:31,999 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemState/04775e65fe6ef103e69037e0e1af437b/.tmp/cf/15a886d5714c4a08b02c764f7802317a is 209, key is 0d3c3a822303247b9a3462aaeaa04a2ad/cf:q/1733181151612/Put/seqid=0 2024-12-02T23:12:32,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-02T23:12:32,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742047_1223 (size=8102) 2024-12-02T23:12:32,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742047_1223 (size=8102) 2024-12-02T23:12:32,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742047_1223 (size=8102) 2024-12-02T23:12:32,035 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:12:32,063 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202412024f70b831ddb04ce5902c57efbb2e3bb1_ac52261d15a01e439a3454a7a0254314 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b202412024f70b831ddb04ce5902c57efbb2e3bb1_ac52261d15a01e439a3454a7a0254314 2024-12-02T23:12:32,065 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemState/ac52261d15a01e439a3454a7a0254314/.tmp/cf/01715d138b2a44e8a189f223ba31c04d, store: [table=testtb-testExportFileSystemState family=cf region=ac52261d15a01e439a3454a7a0254314] 2024-12-02T23:12:32,066 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemState/ac52261d15a01e439a3454a7a0254314/.tmp/cf/01715d138b2a44e8a189f223ba31c04d is 209, key is 1118321c3979ee2c4008c8fd526a3e675/cf:q/1733181151615/Put/seqid=0 2024-12-02T23:12:32,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742048_1224 (size=6121) 2024-12-02T23:12:32,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742048_1224 (size=6121) 2024-12-02T23:12:32,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742048_1224 (size=6121) 2024-12-02T23:12:32,078 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=266, hasBloomFilter=true, into tmp file hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemState/04775e65fe6ef103e69037e0e1af437b/.tmp/cf/15a886d5714c4a08b02c764f7802317a 2024-12-02T23:12:32,097 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemState/04775e65fe6ef103e69037e0e1af437b/.tmp/cf/15a886d5714c4a08b02c764f7802317a as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemState/04775e65fe6ef103e69037e0e1af437b/cf/15a886d5714c4a08b02c764f7802317a 2024-12-02T23:12:32,121 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemState/04775e65fe6ef103e69037e0e1af437b/cf/15a886d5714c4a08b02c764f7802317a, entries=4, sequenceid=6, filesize=6.0 K 2024-12-02T23:12:32,123 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 04775e65fe6ef103e69037e0e1af437b in 229ms, sequenceid=6, compaction requested=false 2024-12-02T23:12:32,123 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-12-02T23:12:32,124 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2603): Flush status journal for 04775e65fe6ef103e69037e0e1af437b: 2024-12-02T23:12:32,124 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1733181149762.04775e65fe6ef103e69037e0e1af437b. for snaptb0-testExportFileSystemState completed. 2024-12-02T23:12:32,124 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1733181149762.04775e65fe6ef103e69037e0e1af437b.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-02T23:12:32,124 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:12:32,124 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemState/04775e65fe6ef103e69037e0e1af437b/cf/15a886d5714c4a08b02c764f7802317a] hfiles 2024-12-02T23:12:32,125 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemState/04775e65fe6ef103e69037e0e1af437b/cf/15a886d5714c4a08b02c764f7802317a for snapshot=snaptb0-testExportFileSystemState 2024-12-02T23:12:32,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742049_1225 (size=14794) 2024-12-02T23:12:32,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742049_1225 (size=14794) 2024-12-02T23:12:32,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742049_1225 (size=14794) 2024-12-02T23:12:32,140 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemState/ac52261d15a01e439a3454a7a0254314/.tmp/cf/01715d138b2a44e8a189f223ba31c04d 2024-12-02T23:12:32,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemState/ac52261d15a01e439a3454a7a0254314/.tmp/cf/01715d138b2a44e8a189f223ba31c04d as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemState/ac52261d15a01e439a3454a7a0254314/cf/01715d138b2a44e8a189f223ba31c04d 2024-12-02T23:12:32,164 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemState/ac52261d15a01e439a3454a7a0254314/cf/01715d138b2a44e8a189f223ba31c04d, entries=46, sequenceid=6, filesize=14.4 K 2024-12-02T23:12:32,166 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for ac52261d15a01e439a3454a7a0254314 in 271ms, sequenceid=6, compaction requested=false 2024-12-02T23:12:32,166 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2603): Flush status journal for ac52261d15a01e439a3454a7a0254314: 2024-12-02T23:12:32,166 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1733181149762.ac52261d15a01e439a3454a7a0254314. for snaptb0-testExportFileSystemState completed. 2024-12-02T23:12:32,166 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1733181149762.ac52261d15a01e439a3454a7a0254314.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-02T23:12:32,166 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:12:32,166 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemState/ac52261d15a01e439a3454a7a0254314/cf/01715d138b2a44e8a189f223ba31c04d] hfiles 2024-12-02T23:12:32,166 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemState/ac52261d15a01e439a3454a7a0254314/cf/01715d138b2a44e8a189f223ba31c04d for snapshot=snaptb0-testExportFileSystemState 2024-12-02T23:12:32,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742050_1226 (size=110) 2024-12-02T23:12:32,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742050_1226 (size=110) 2024-12-02T23:12:32,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742050_1226 (size=110) 2024-12-02T23:12:32,250 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1733181149762.04775e65fe6ef103e69037e0e1af437b. 2024-12-02T23:12:32,250 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-12-02T23:12:32,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=105 2024-12-02T23:12:32,251 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 04775e65fe6ef103e69037e0e1af437b 2024-12-02T23:12:32,252 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 04775e65fe6ef103e69037e0e1af437b 2024-12-02T23:12:32,256 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=105, ppid=104, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 04775e65fe6ef103e69037e0e1af437b in 521 msec 2024-12-02T23:12:32,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742051_1227 (size=110) 2024-12-02T23:12:32,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742051_1227 (size=110) 2024-12-02T23:12:32,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742051_1227 (size=110) 2024-12-02T23:12:32,284 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1733181149762.ac52261d15a01e439a3454a7a0254314. 2024-12-02T23:12:32,284 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=106 2024-12-02T23:12:32,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=106 2024-12-02T23:12:32,285 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region ac52261d15a01e439a3454a7a0254314 2024-12-02T23:12:32,286 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ac52261d15a01e439a3454a7a0254314 2024-12-02T23:12:32,294 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=106, resume processing ppid=104 2024-12-02T23:12:32,294 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=106, ppid=104, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ac52261d15a01e439a3454a7a0254314 in 558 msec 2024-12-02T23:12:32,294 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T23:12:32,298 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T23:12:32,317 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T23:12:32,317 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T23:12:32,317 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:12:32,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-02T23:12:32,325 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b202412024f70b831ddb04ce5902c57efbb2e3bb1_ac52261d15a01e439a3454a7a0254314, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e2024120256f31ab8e01545a7a2767a8ce42aae29_04775e65fe6ef103e69037e0e1af437b] hfiles 2024-12-02T23:12:32,325 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b202412024f70b831ddb04ce5902c57efbb2e3bb1_ac52261d15a01e439a3454a7a0254314 2024-12-02T23:12:32,325 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e2024120256f31ab8e01545a7a2767a8ce42aae29_04775e65fe6ef103e69037e0e1af437b 2024-12-02T23:12:32,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742052_1228 (size=294) 2024-12-02T23:12:32,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742052_1228 (size=294) 2024-12-02T23:12:32,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742052_1228 (size=294) 2024-12-02T23:12:32,377 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T23:12:32,378 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemState 2024-12-02T23:12:32,378 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-02T23:12:32,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742053_1229 (size=963) 2024-12-02T23:12:32,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742053_1229 (size=963) 2024-12-02T23:12:32,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742053_1229 (size=963) 2024-12-02T23:12:32,517 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T23:12:32,536 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T23:12:32,537 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-02T23:12:32,538 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T23:12:32,539 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-02T23:12:32,540 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=104, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 850 msec 2024-12-02T23:12:32,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-02T23:12:32,833 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-12-02T23:12:32,833 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181152833 2024-12-02T23:12:32,833 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:33369, tgtDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181152833, rawTgtDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181152833, srcFsUri=hdfs://localhost:33369, srcDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:12:32,881 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:33369, inputRoot=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:12:32,881 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_71179475_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181152833, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181152833/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-02T23:12:32,884 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-02T23:12:32,895 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testExportFileSystemState to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181152833/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-02T23:12:32,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742054_1230 (size=165) 2024-12-02T23:12:32,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742054_1230 (size=165) 2024-12-02T23:12:32,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742054_1230 (size=165) 2024-12-02T23:12:32,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742055_1231 (size=963) 2024-12-02T23:12:32,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742055_1231 (size=963) 2024-12-02T23:12:32,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742055_1231 (size=963) 2024-12-02T23:12:32,992 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:12:32,992 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:12:32,993 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:12:33,269 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0003_000001 (auth:SIMPLE) from 127.0.0.1:48730 2024-12-02T23:12:33,290 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_0/usercache/jenkins/appcache/application_1733180955226_0003/container_1733180955226_0003_01_000001/launch_container.sh] 2024-12-02T23:12:33,291 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_0/usercache/jenkins/appcache/application_1733180955226_0003/container_1733180955226_0003_01_000001/container_tokens] 2024-12-02T23:12:33,291 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_0/usercache/jenkins/appcache/application_1733180955226_0003/container_1733180955226_0003_01_000001/sysfs] 2024-12-02T23:12:33,584 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T23:12:34,325 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop-10298144102346738619.jar 2024-12-02T23:12:34,326 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:12:34,326 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:12:34,421 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop-15432563099319302676.jar 2024-12-02T23:12:34,421 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:12:34,422 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:12:34,422 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:12:34,423 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:12:34,423 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:12:34,423 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:12:34,424 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-02T23:12:34,424 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-02T23:12:34,424 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-02T23:12:34,425 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-02T23:12:34,425 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-02T23:12:34,425 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-02T23:12:34,426 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-02T23:12:34,426 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-02T23:12:34,426 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-02T23:12:34,427 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-02T23:12:34,427 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-02T23:12:34,427 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:12:34,428 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:12:34,428 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T23:12:34,428 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:12:34,429 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:12:34,429 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T23:12:34,430 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T23:12:34,520 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T23:12:34,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742056_1232 (size=131440) 2024-12-02T23:12:34,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742056_1232 (size=131440) 2024-12-02T23:12:34,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742056_1232 (size=131440) 2024-12-02T23:12:34,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742057_1233 (size=4188619) 2024-12-02T23:12:34,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742057_1233 (size=4188619) 2024-12-02T23:12:34,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742057_1233 (size=4188619) 2024-12-02T23:12:34,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742058_1234 (size=1323991) 2024-12-02T23:12:34,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742058_1234 (size=1323991) 2024-12-02T23:12:34,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742058_1234 (size=1323991) 2024-12-02T23:12:34,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742059_1235 (size=903926) 2024-12-02T23:12:34,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742059_1235 (size=903926) 2024-12-02T23:12:34,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742059_1235 (size=903926) 2024-12-02T23:12:34,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742060_1236 (size=8360083) 2024-12-02T23:12:34,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742060_1236 (size=8360083) 2024-12-02T23:12:34,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742060_1236 (size=8360083) 2024-12-02T23:12:34,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742061_1237 (size=1877034) 2024-12-02T23:12:34,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742061_1237 (size=1877034) 2024-12-02T23:12:34,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742061_1237 (size=1877034) 2024-12-02T23:12:34,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742062_1238 (size=77835) 2024-12-02T23:12:34,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742062_1238 (size=77835) 2024-12-02T23:12:34,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742062_1238 (size=77835) 2024-12-02T23:12:34,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742063_1239 (size=30949) 2024-12-02T23:12:34,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742063_1239 (size=30949) 2024-12-02T23:12:34,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742063_1239 (size=30949) 2024-12-02T23:12:34,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742064_1240 (size=1597213) 2024-12-02T23:12:34,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742064_1240 (size=1597213) 2024-12-02T23:12:34,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742064_1240 (size=1597213) 2024-12-02T23:12:35,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742065_1241 (size=443172) 2024-12-02T23:12:35,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742065_1241 (size=443172) 2024-12-02T23:12:35,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742065_1241 (size=443172) 2024-12-02T23:12:35,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742066_1242 (size=4695811) 2024-12-02T23:12:35,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742066_1242 (size=4695811) 2024-12-02T23:12:35,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742066_1242 (size=4695811) 2024-12-02T23:12:35,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742067_1243 (size=232957) 2024-12-02T23:12:35,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742067_1243 (size=232957) 2024-12-02T23:12:35,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742067_1243 (size=232957) 2024-12-02T23:12:35,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742068_1244 (size=127628) 2024-12-02T23:12:35,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742068_1244 (size=127628) 2024-12-02T23:12:35,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742068_1244 (size=127628) 2024-12-02T23:12:35,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742069_1245 (size=20406) 2024-12-02T23:12:35,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742069_1245 (size=20406) 2024-12-02T23:12:35,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742069_1245 (size=20406) 2024-12-02T23:12:35,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742070_1246 (size=5175431) 2024-12-02T23:12:35,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742070_1246 (size=5175431) 2024-12-02T23:12:35,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742070_1246 (size=5175431) 2024-12-02T23:12:35,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742071_1247 (size=217634) 2024-12-02T23:12:35,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742071_1247 (size=217634) 2024-12-02T23:12:35,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742071_1247 (size=217634) 2024-12-02T23:12:35,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742072_1248 (size=1832290) 2024-12-02T23:12:35,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742072_1248 (size=1832290) 2024-12-02T23:12:35,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742072_1248 (size=1832290) 2024-12-02T23:12:35,202 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-02T23:12:35,202 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-02T23:12:35,203 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-02T23:12:35,203 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-02T23:12:35,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742073_1249 (size=322274) 2024-12-02T23:12:35,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742073_1249 (size=322274) 2024-12-02T23:12:35,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742073_1249 (size=322274) 2024-12-02T23:12:35,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742074_1250 (size=503880) 2024-12-02T23:12:35,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742074_1250 (size=503880) 2024-12-02T23:12:35,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742074_1250 (size=503880) 2024-12-02T23:12:35,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742075_1251 (size=6424743) 2024-12-02T23:12:35,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742075_1251 (size=6424743) 2024-12-02T23:12:35,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742075_1251 (size=6424743) 2024-12-02T23:12:35,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742076_1252 (size=29229) 2024-12-02T23:12:35,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742076_1252 (size=29229) 2024-12-02T23:12:35,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742076_1252 (size=29229) 2024-12-02T23:12:35,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742077_1253 (size=24096) 2024-12-02T23:12:35,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742077_1253 (size=24096) 2024-12-02T23:12:35,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742077_1253 (size=24096) 2024-12-02T23:12:35,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742078_1254 (size=111872) 2024-12-02T23:12:35,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742078_1254 (size=111872) 2024-12-02T23:12:35,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742078_1254 (size=111872) 2024-12-02T23:12:35,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742079_1255 (size=45609) 2024-12-02T23:12:35,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742079_1255 (size=45609) 2024-12-02T23:12:35,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742079_1255 (size=45609) 2024-12-02T23:12:35,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742080_1256 (size=136454) 2024-12-02T23:12:35,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742080_1256 (size=136454) 2024-12-02T23:12:35,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742080_1256 (size=136454) 2024-12-02T23:12:35,405 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-02T23:12:35,408 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemState' hfile list 2024-12-02T23:12:35,412 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.4 K 2024-12-02T23:12:35,412 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=7.9 K 2024-12-02T23:12:35,412 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=6.0 K 2024-12-02T23:12:35,412 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.0 K 2024-12-02T23:12:35,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742081_1257 (size=1035) 2024-12-02T23:12:35,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742081_1257 (size=1035) 2024-12-02T23:12:35,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742081_1257 (size=1035) 2024-12-02T23:12:35,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742082_1258 (size=35) 2024-12-02T23:12:35,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742082_1258 (size=35) 2024-12-02T23:12:35,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742082_1258 (size=35) 2024-12-02T23:12:35,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742083_1259 (size=304006) 2024-12-02T23:12:35,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742083_1259 (size=304006) 2024-12-02T23:12:35,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742083_1259 (size=304006) 2024-12-02T23:12:35,527 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T23:12:35,527 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T23:12:36,073 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0004_000001 (auth:SIMPLE) from 127.0.0.1:52394 2024-12-02T23:12:40,706 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T23:12:43,283 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0004_000001 (auth:SIMPLE) from 127.0.0.1:36040 2024-12-02T23:12:43,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742084_1260 (size=349704) 2024-12-02T23:12:43,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742084_1260 (size=349704) 2024-12-02T23:12:43,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742084_1260 (size=349704) 2024-12-02T23:12:45,663 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0004_000001 (auth:SIMPLE) from 127.0.0.1:48578 2024-12-02T23:12:45,664 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0004_000001 (auth:SIMPLE) from 127.0.0.1:58456 2024-12-02T23:12:46,415 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0004_000001 (auth:SIMPLE) from 127.0.0.1:48592 2024-12-02T23:12:46,415 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0004_000001 (auth:SIMPLE) from 127.0.0.1:58462 2024-12-02T23:12:49,271 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733180955226_0004_01_000006 while processing FINISH_CONTAINERS event 2024-12-02T23:12:52,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742085_1261 (size=14794) 2024-12-02T23:12:52,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742085_1261 (size=14794) 2024-12-02T23:12:52,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742085_1261 (size=14794) 2024-12-02T23:12:53,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742087_1263 (size=6121) 2024-12-02T23:12:53,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742087_1263 (size=6121) 2024-12-02T23:12:53,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742087_1263 (size=6121) 2024-12-02T23:12:53,426 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_3/usercache/jenkins/appcache/application_1733180955226_0004/container_1733180955226_0004_01_000004/launch_container.sh] 2024-12-02T23:12:53,427 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_3/usercache/jenkins/appcache/application_1733180955226_0004/container_1733180955226_0004_01_000004/container_tokens] 2024-12-02T23:12:53,427 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_3/usercache/jenkins/appcache/application_1733180955226_0004/container_1733180955226_0004_01_000004/sysfs] 2024-12-02T23:12:53,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742088_1264 (size=5171) 2024-12-02T23:12:53,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742088_1264 (size=5171) 2024-12-02T23:12:53,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742088_1264 (size=5171) 2024-12-02T23:12:53,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742089_1265 (size=8102) 2024-12-02T23:12:53,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742089_1265 (size=8102) 2024-12-02T23:12:53,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742089_1265 (size=8102) 2024-12-02T23:12:53,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742086_1262 (size=31748) 2024-12-02T23:12:53,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742086_1262 (size=31748) 2024-12-02T23:12:53,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742086_1262 (size=31748) 2024-12-02T23:12:53,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742090_1266 (size=466) 2024-12-02T23:12:53,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742090_1266 (size=466) 2024-12-02T23:12:53,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742090_1266 (size=466) 2024-12-02T23:12:53,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742091_1267 (size=31748) 2024-12-02T23:12:53,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742091_1267 (size=31748) 2024-12-02T23:12:53,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742091_1267 (size=31748) 2024-12-02T23:12:53,807 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_0/usercache/jenkins/appcache/application_1733180955226_0004/container_1733180955226_0004_01_000003/launch_container.sh] 2024-12-02T23:12:53,807 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_0/usercache/jenkins/appcache/application_1733180955226_0004/container_1733180955226_0004_01_000003/container_tokens] 2024-12-02T23:12:53,807 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_0/usercache/jenkins/appcache/application_1733180955226_0004/container_1733180955226_0004_01_000003/sysfs] 2024-12-02T23:12:53,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742092_1268 (size=349704) 2024-12-02T23:12:53,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742092_1268 (size=349704) 2024-12-02T23:12:53,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742092_1268 (size=349704) 2024-12-02T23:12:53,833 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0004_000001 (auth:SIMPLE) from 127.0.0.1:53540 2024-12-02T23:12:53,842 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0004_000001 (auth:SIMPLE) from 127.0.0.1:53546 2024-12-02T23:12:54,911 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-02T23:12:54,913 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-02T23:12:54,921 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemState 2024-12-02T23:12:54,921 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-02T23:12:54,922 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-02T23:12:54,922 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_71179475_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-02T23:12:54,922 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-02T23:12:54,922 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-02T23:12:54,922 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_71179475_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181152833/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181152833/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-02T23:12:54,923 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181152833/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-02T23:12:54,923 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181152833/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-02T23:12:54,929 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testExportFileSystemState 2024-12-02T23:12:54,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=107, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState 2024-12-02T23:12:54,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-02T23:12:54,933 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181174933"}]},"ts":"1733181174933"} 2024-12-02T23:12:54,935 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLING in hbase:meta 2024-12-02T23:12:54,935 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemState to state=DISABLING 2024-12-02T23:12:54,935 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState}] 2024-12-02T23:12:54,937 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=04775e65fe6ef103e69037e0e1af437b, UNASSIGN}, {pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ac52261d15a01e439a3454a7a0254314, UNASSIGN}] 2024-12-02T23:12:54,938 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ac52261d15a01e439a3454a7a0254314, UNASSIGN 2024-12-02T23:12:54,938 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=04775e65fe6ef103e69037e0e1af437b, UNASSIGN 2024-12-02T23:12:54,939 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=110 updating hbase:meta row=ac52261d15a01e439a3454a7a0254314, regionState=CLOSING, regionLocation=9e5ab3b623d7,38049,1733180945779 2024-12-02T23:12:54,939 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=109 updating hbase:meta row=04775e65fe6ef103e69037e0e1af437b, regionState=CLOSING, regionLocation=9e5ab3b623d7,39847,1733180945622 2024-12-02T23:12:54,940 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ac52261d15a01e439a3454a7a0254314, UNASSIGN because future has completed 2024-12-02T23:12:54,940 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T23:12:54,941 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE, hasLock=false; CloseRegionProcedure ac52261d15a01e439a3454a7a0254314, server=9e5ab3b623d7,38049,1733180945779}] 2024-12-02T23:12:54,941 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=04775e65fe6ef103e69037e0e1af437b, UNASSIGN because future has completed 2024-12-02T23:12:54,942 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T23:12:54,942 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=112, ppid=109, state=RUNNABLE, hasLock=false; CloseRegionProcedure 04775e65fe6ef103e69037e0e1af437b, server=9e5ab3b623d7,39847,1733180945622}] 2024-12-02T23:12:55,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-02T23:12:55,093 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(122): Close ac52261d15a01e439a3454a7a0254314 2024-12-02T23:12:55,093 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T23:12:55,093 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1722): Closing ac52261d15a01e439a3454a7a0254314, disabling compactions & flushes 2024-12-02T23:12:55,093 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1733181149762.ac52261d15a01e439a3454a7a0254314. 2024-12-02T23:12:55,093 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1733181149762.ac52261d15a01e439a3454a7a0254314. 2024-12-02T23:12:55,093 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1733181149762.ac52261d15a01e439a3454a7a0254314. after waiting 0 ms 2024-12-02T23:12:55,093 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1733181149762.ac52261d15a01e439a3454a7a0254314. 2024-12-02T23:12:55,095 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(122): Close 04775e65fe6ef103e69037e0e1af437b 2024-12-02T23:12:55,095 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T23:12:55,095 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1722): Closing 04775e65fe6ef103e69037e0e1af437b, disabling compactions & flushes 2024-12-02T23:12:55,095 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1733181149762.04775e65fe6ef103e69037e0e1af437b. 2024-12-02T23:12:55,095 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1733181149762.04775e65fe6ef103e69037e0e1af437b. 2024-12-02T23:12:55,095 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1733181149762.04775e65fe6ef103e69037e0e1af437b. after waiting 0 ms 2024-12-02T23:12:55,095 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1733181149762.04775e65fe6ef103e69037e0e1af437b. 2024-12-02T23:12:55,099 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemState/ac52261d15a01e439a3454a7a0254314/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T23:12:55,100 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:12:55,100 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1733181149762.ac52261d15a01e439a3454a7a0254314. 2024-12-02T23:12:55,100 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1676): Region close journal for ac52261d15a01e439a3454a7a0254314: Waiting for close lock at 1733181175093Running coprocessor pre-close hooks at 1733181175093Disabling compacts and flushes for region at 1733181175093Disabling writes for close at 1733181175093Writing region close event to WAL at 1733181175094 (+1 ms)Running coprocessor post-close hooks at 1733181175100 (+6 ms)Closed at 1733181175100 2024-12-02T23:12:55,100 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemState/04775e65fe6ef103e69037e0e1af437b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T23:12:55,101 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:12:55,101 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1733181149762.04775e65fe6ef103e69037e0e1af437b. 2024-12-02T23:12:55,101 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1676): Region close journal for 04775e65fe6ef103e69037e0e1af437b: Waiting for close lock at 1733181175095Running coprocessor pre-close hooks at 1733181175095Disabling compacts and flushes for region at 1733181175095Disabling writes for close at 1733181175095Writing region close event to WAL at 1733181175096 (+1 ms)Running coprocessor post-close hooks at 1733181175101 (+5 ms)Closed at 1733181175101 2024-12-02T23:12:55,102 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(157): Closed ac52261d15a01e439a3454a7a0254314 2024-12-02T23:12:55,103 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=110 updating hbase:meta row=ac52261d15a01e439a3454a7a0254314, regionState=CLOSED 2024-12-02T23:12:55,103 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(157): Closed 04775e65fe6ef103e69037e0e1af437b 2024-12-02T23:12:55,104 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=109 updating hbase:meta row=04775e65fe6ef103e69037e0e1af437b, regionState=CLOSED 2024-12-02T23:12:55,105 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=111, ppid=110, state=RUNNABLE, hasLock=false; CloseRegionProcedure ac52261d15a01e439a3454a7a0254314, server=9e5ab3b623d7,38049,1733180945779 because future has completed 2024-12-02T23:12:55,107 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=112, ppid=109, state=RUNNABLE, hasLock=false; CloseRegionProcedure 04775e65fe6ef103e69037e0e1af437b, server=9e5ab3b623d7,39847,1733180945622 because future has completed 2024-12-02T23:12:55,110 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=111, resume processing ppid=110 2024-12-02T23:12:55,110 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=111, ppid=110, state=SUCCESS, hasLock=false; CloseRegionProcedure ac52261d15a01e439a3454a7a0254314, server=9e5ab3b623d7,38049,1733180945779 in 166 msec 2024-12-02T23:12:55,112 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=112, resume processing ppid=109 2024-12-02T23:12:55,112 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=112, ppid=109, state=SUCCESS, hasLock=false; CloseRegionProcedure 04775e65fe6ef103e69037e0e1af437b, server=9e5ab3b623d7,39847,1733180945622 in 167 msec 2024-12-02T23:12:55,112 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=110, ppid=108, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=ac52261d15a01e439a3454a7a0254314, UNASSIGN in 173 msec 2024-12-02T23:12:55,114 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=109, resume processing ppid=108 2024-12-02T23:12:55,114 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=109, ppid=108, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=04775e65fe6ef103e69037e0e1af437b, UNASSIGN in 175 msec 2024-12-02T23:12:55,117 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=108, resume processing ppid=107 2024-12-02T23:12:55,117 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=108, ppid=107, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState in 180 msec 2024-12-02T23:12:55,118 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181175118"}]},"ts":"1733181175118"} 2024-12-02T23:12:55,120 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLED in hbase:meta 2024-12-02T23:12:55,120 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemState to state=DISABLED 2024-12-02T23:12:55,123 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=107, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState in 191 msec 2024-12-02T23:12:55,202 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-02T23:12:55,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-02T23:12:55,253 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-02T23:12:55,253 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testExportFileSystemState 2024-12-02T23:12:55,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=113, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-02T23:12:55,256 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=113, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-02T23:12:55,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemState 2024-12-02T23:12:55,258 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=113, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-02T23:12:55,259 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41311 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemState 2024-12-02T23:12:55,262 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemState/04775e65fe6ef103e69037e0e1af437b 2024-12-02T23:12:55,262 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemState/ac52261d15a01e439a3454a7a0254314 2024-12-02T23:12:55,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-02T23:12:55,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-02T23:12:55,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-02T23:12:55,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-02T23:12:55,265 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-02T23:12:55,265 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-02T23:12:55,265 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-02T23:12:55,265 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemState/ac52261d15a01e439a3454a7a0254314/cf, FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemState/ac52261d15a01e439a3454a7a0254314/recovered.edits] 2024-12-02T23:12:55,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-02T23:12:55,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:12:55,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-02T23:12:55,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:12:55,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-02T23:12:55,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:12:55,266 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:12:55,267 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data null 2024-12-02T23:12:55,267 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-02T23:12:55,267 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemState/04775e65fe6ef103e69037e0e1af437b/cf, FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemState/04775e65fe6ef103e69037e0e1af437b/recovered.edits] 2024-12-02T23:12:55,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=113 2024-12-02T23:12:55,272 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemState/04775e65fe6ef103e69037e0e1af437b/cf/15a886d5714c4a08b02c764f7802317a to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportFileSystemState/04775e65fe6ef103e69037e0e1af437b/cf/15a886d5714c4a08b02c764f7802317a 2024-12-02T23:12:55,273 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemState/ac52261d15a01e439a3454a7a0254314/cf/01715d138b2a44e8a189f223ba31c04d to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportFileSystemState/ac52261d15a01e439a3454a7a0254314/cf/01715d138b2a44e8a189f223ba31c04d 2024-12-02T23:12:55,277 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemState/ac52261d15a01e439a3454a7a0254314/recovered.edits/9.seqid to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportFileSystemState/ac52261d15a01e439a3454a7a0254314/recovered.edits/9.seqid 2024-12-02T23:12:55,277 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemState/04775e65fe6ef103e69037e0e1af437b/recovered.edits/9.seqid to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportFileSystemState/04775e65fe6ef103e69037e0e1af437b/recovered.edits/9.seqid 2024-12-02T23:12:55,278 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemState/ac52261d15a01e439a3454a7a0254314 2024-12-02T23:12:55,278 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemState/04775e65fe6ef103e69037e0e1af437b 2024-12-02T23:12:55,278 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemState regions 2024-12-02T23:12:55,278 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d 2024-12-02T23:12:55,279 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf] 2024-12-02T23:12:55,284 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b202412024f70b831ddb04ce5902c57efbb2e3bb1_ac52261d15a01e439a3454a7a0254314 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b202412024f70b831ddb04ce5902c57efbb2e3bb1_ac52261d15a01e439a3454a7a0254314 2024-12-02T23:12:55,286 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e2024120256f31ab8e01545a7a2767a8ce42aae29_04775e65fe6ef103e69037e0e1af437b to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e2024120256f31ab8e01545a7a2767a8ce42aae29_04775e65fe6ef103e69037e0e1af437b 2024-12-02T23:12:55,286 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d 2024-12-02T23:12:55,289 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=113, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-02T23:12:55,292 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemState from hbase:meta 2024-12-02T23:12:55,294 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemState' descriptor. 2024-12-02T23:12:55,295 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=113, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-02T23:12:55,295 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemState' from region states. 2024-12-02T23:12:55,296 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,,1733181149762.04775e65fe6ef103e69037e0e1af437b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733181175295"}]},"ts":"9223372036854775807"} 2024-12-02T23:12:55,296 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,1,1733181149762.ac52261d15a01e439a3454a7a0254314.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733181175295"}]},"ts":"9223372036854775807"} 2024-12-02T23:12:55,297 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-02T23:12:55,298 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 04775e65fe6ef103e69037e0e1af437b, NAME => 'testtb-testExportFileSystemState,,1733181149762.04775e65fe6ef103e69037e0e1af437b.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => ac52261d15a01e439a3454a7a0254314, NAME => 'testtb-testExportFileSystemState,1,1733181149762.ac52261d15a01e439a3454a7a0254314.', STARTKEY => '1', ENDKEY => ''}] 2024-12-02T23:12:55,298 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemState' as deleted. 2024-12-02T23:12:55,298 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733181175298"}]},"ts":"9223372036854775807"} 2024-12-02T23:12:55,299 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemState state from META 2024-12-02T23:12:55,300 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=113, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-02T23:12:55,302 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=113, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState in 47 msec 2024-12-02T23:12:55,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=113 2024-12-02T23:12:55,373 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemState 2024-12-02T23:12:55,373 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-02T23:12:55,380 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testExportFileSystemState" type: DISABLED 2024-12-02T23:12:55,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemState 2024-12-02T23:12:55,384 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testExportFileSystemState" type: DISABLED 2024-12-02T23:12:55,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemState 2024-12-02T23:12:55,417 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemState Thread=800 (was 792) Potentially hanging thread: HFileArchiver-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-3896 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_71179475_22 at /127.0.0.1:54530 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (243496960) connection to localhost/127.0.0.1:43739 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43739 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (243496960) connection to localhost/127.0.0.1:32795 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: process reaper (pid 556) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (243496960) connection to localhost/127.0.0.1:32795 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ApplicationMasterLauncher #6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33013 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_71179475_22 at /127.0.0.1:52316 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1081275364_1 at /127.0.0.1:53108 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_71179475_22 at /127.0.0.1:53122 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=798 (was 796) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=929 (was 906) - SystemLoadAverage LEAK? -, ProcessCount=16 (was 16), AvailableMemoryMB=1724 (was 2271) 2024-12-02T23:12:55,417 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=800 is superior to 500 2024-12-02T23:12:55,437 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testConsecutiveExports Thread=800, OpenFileDescriptor=798, MaxFileDescriptor=1048576, SystemLoadAverage=929, ProcessCount=17, AvailableMemoryMB=1721 2024-12-02T23:12:55,437 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=800 is superior to 500 2024-12-02T23:12:55,438 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T23:12:55,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=114, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports 2024-12-02T23:12:55,440 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T23:12:55,441 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testConsecutiveExports" procId is: 114 2024-12-02T23:12:55,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-02T23:12:55,442 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T23:12:55,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742093_1269 (size=440) 2024-12-02T23:12:55,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742093_1269 (size=440) 2024-12-02T23:12:55,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742093_1269 (size=440) 2024-12-02T23:12:55,457 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 3931075a1273d0e31cda2cc53776b03d, NAME => 'testtb-testConsecutiveExports,,1733181175438.3931075a1273d0e31cda2cc53776b03d.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:12:55,458 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 163ac31eb017c11435b0b7ff88739a2b, NAME => 'testtb-testConsecutiveExports,1,1733181175438.163ac31eb017c11435b0b7ff88739a2b.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:12:55,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742094_1270 (size=65) 2024-12-02T23:12:55,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742094_1270 (size=65) 2024-12-02T23:12:55,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742094_1270 (size=65) 2024-12-02T23:12:55,474 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1733181175438.3931075a1273d0e31cda2cc53776b03d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:12:55,474 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1722): Closing 3931075a1273d0e31cda2cc53776b03d, disabling compactions & flushes 2024-12-02T23:12:55,474 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1733181175438.3931075a1273d0e31cda2cc53776b03d. 2024-12-02T23:12:55,474 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1733181175438.3931075a1273d0e31cda2cc53776b03d. 2024-12-02T23:12:55,474 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1733181175438.3931075a1273d0e31cda2cc53776b03d. after waiting 0 ms 2024-12-02T23:12:55,474 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1733181175438.3931075a1273d0e31cda2cc53776b03d. 2024-12-02T23:12:55,474 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1733181175438.3931075a1273d0e31cda2cc53776b03d. 2024-12-02T23:12:55,475 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1676): Region close journal for 3931075a1273d0e31cda2cc53776b03d: Waiting for close lock at 1733181175474Disabling compacts and flushes for region at 1733181175474Disabling writes for close at 1733181175474Writing region close event to WAL at 1733181175474Closed at 1733181175474 2024-12-02T23:12:55,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742095_1271 (size=65) 2024-12-02T23:12:55,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742095_1271 (size=65) 2024-12-02T23:12:55,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742095_1271 (size=65) 2024-12-02T23:12:55,477 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1733181175438.163ac31eb017c11435b0b7ff88739a2b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:12:55,477 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1722): Closing 163ac31eb017c11435b0b7ff88739a2b, disabling compactions & flushes 2024-12-02T23:12:55,477 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1733181175438.163ac31eb017c11435b0b7ff88739a2b. 2024-12-02T23:12:55,477 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1733181175438.163ac31eb017c11435b0b7ff88739a2b. 2024-12-02T23:12:55,477 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1733181175438.163ac31eb017c11435b0b7ff88739a2b. after waiting 0 ms 2024-12-02T23:12:55,478 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1733181175438.163ac31eb017c11435b0b7ff88739a2b. 2024-12-02T23:12:55,478 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1733181175438.163ac31eb017c11435b0b7ff88739a2b. 2024-12-02T23:12:55,478 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1676): Region close journal for 163ac31eb017c11435b0b7ff88739a2b: Waiting for close lock at 1733181175477Disabling compacts and flushes for region at 1733181175477Disabling writes for close at 1733181175477Writing region close event to WAL at 1733181175478 (+1 ms)Closed at 1733181175478 2024-12-02T23:12:55,479 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T23:12:55,479 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,,1733181175438.3931075a1273d0e31cda2cc53776b03d.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733181175479"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733181175479"}]},"ts":"1733181175479"} 2024-12-02T23:12:55,479 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,1,1733181175438.163ac31eb017c11435b0b7ff88739a2b.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733181175479"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733181175479"}]},"ts":"1733181175479"} 2024-12-02T23:12:55,482 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T23:12:55,483 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T23:12:55,483 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181175483"}]},"ts":"1733181175483"} 2024-12-02T23:12:55,485 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLING in hbase:meta 2024-12-02T23:12:55,485 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {9e5ab3b623d7=0} racks are {/default-rack=0} 2024-12-02T23:12:55,486 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T23:12:55,486 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T23:12:55,486 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T23:12:55,486 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T23:12:55,486 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T23:12:55,486 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T23:12:55,486 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T23:12:55,486 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T23:12:55,486 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T23:12:55,486 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T23:12:55,487 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=3931075a1273d0e31cda2cc53776b03d, ASSIGN}, {pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=163ac31eb017c11435b0b7ff88739a2b, ASSIGN}] 2024-12-02T23:12:55,488 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=163ac31eb017c11435b0b7ff88739a2b, ASSIGN 2024-12-02T23:12:55,488 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=3931075a1273d0e31cda2cc53776b03d, ASSIGN 2024-12-02T23:12:55,489 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=3931075a1273d0e31cda2cc53776b03d, ASSIGN; state=OFFLINE, location=9e5ab3b623d7,38049,1733180945779; forceNewPlan=false, retain=false 2024-12-02T23:12:55,489 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=163ac31eb017c11435b0b7ff88739a2b, ASSIGN; state=OFFLINE, location=9e5ab3b623d7,41311,1733180945828; forceNewPlan=false, retain=false 2024-12-02T23:12:55,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-02T23:12:55,639 INFO [9e5ab3b623d7:41513 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T23:12:55,639 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=3931075a1273d0e31cda2cc53776b03d, regionState=OPENING, regionLocation=9e5ab3b623d7,38049,1733180945779 2024-12-02T23:12:55,639 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=116 updating hbase:meta row=163ac31eb017c11435b0b7ff88739a2b, regionState=OPENING, regionLocation=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:12:55,642 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=163ac31eb017c11435b0b7ff88739a2b, ASSIGN because future has completed 2024-12-02T23:12:55,642 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE, hasLock=false; OpenRegionProcedure 163ac31eb017c11435b0b7ff88739a2b, server=9e5ab3b623d7,41311,1733180945828}] 2024-12-02T23:12:55,643 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=3931075a1273d0e31cda2cc53776b03d, ASSIGN because future has completed 2024-12-02T23:12:55,643 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=118, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3931075a1273d0e31cda2cc53776b03d, server=9e5ab3b623d7,38049,1733180945779}] 2024-12-02T23:12:55,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-02T23:12:55,809 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,1,1733181175438.163ac31eb017c11435b0b7ff88739a2b. 2024-12-02T23:12:55,809 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7752): Opening region: {ENCODED => 163ac31eb017c11435b0b7ff88739a2b, NAME => 'testtb-testConsecutiveExports,1,1733181175438.163ac31eb017c11435b0b7ff88739a2b.', STARTKEY => '1', ENDKEY => ''} 2024-12-02T23:12:55,809 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,,1733181175438.3931075a1273d0e31cda2cc53776b03d. 2024-12-02T23:12:55,810 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7752): Opening region: {ENCODED => 3931075a1273d0e31cda2cc53776b03d, NAME => 'testtb-testConsecutiveExports,,1733181175438.3931075a1273d0e31cda2cc53776b03d.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T23:12:55,810 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,1,1733181175438.163ac31eb017c11435b0b7ff88739a2b. service=AccessControlService 2024-12-02T23:12:55,810 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,,1733181175438.3931075a1273d0e31cda2cc53776b03d. service=AccessControlService 2024-12-02T23:12:55,810 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T23:12:55,810 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T23:12:55,810 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 163ac31eb017c11435b0b7ff88739a2b 2024-12-02T23:12:55,810 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1733181175438.163ac31eb017c11435b0b7ff88739a2b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:12:55,810 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 3931075a1273d0e31cda2cc53776b03d 2024-12-02T23:12:55,811 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7794): checking encryption for 163ac31eb017c11435b0b7ff88739a2b 2024-12-02T23:12:55,811 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7797): checking classloading for 163ac31eb017c11435b0b7ff88739a2b 2024-12-02T23:12:55,811 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1733181175438.3931075a1273d0e31cda2cc53776b03d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:12:55,811 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7794): checking encryption for 3931075a1273d0e31cda2cc53776b03d 2024-12-02T23:12:55,811 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7797): checking classloading for 3931075a1273d0e31cda2cc53776b03d 2024-12-02T23:12:55,813 INFO [StoreOpener-3931075a1273d0e31cda2cc53776b03d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3931075a1273d0e31cda2cc53776b03d 2024-12-02T23:12:55,818 INFO [StoreOpener-163ac31eb017c11435b0b7ff88739a2b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 163ac31eb017c11435b0b7ff88739a2b 2024-12-02T23:12:55,820 INFO [StoreOpener-3931075a1273d0e31cda2cc53776b03d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3931075a1273d0e31cda2cc53776b03d columnFamilyName cf 2024-12-02T23:12:55,823 INFO [StoreOpener-163ac31eb017c11435b0b7ff88739a2b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 163ac31eb017c11435b0b7ff88739a2b columnFamilyName cf 2024-12-02T23:12:55,830 DEBUG [StoreOpener-3931075a1273d0e31cda2cc53776b03d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:12:55,833 INFO [StoreOpener-3931075a1273d0e31cda2cc53776b03d-1 {}] regionserver.HStore(327): Store=3931075a1273d0e31cda2cc53776b03d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T23:12:55,834 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1038): replaying wal for 3931075a1273d0e31cda2cc53776b03d 2024-12-02T23:12:55,834 DEBUG [StoreOpener-163ac31eb017c11435b0b7ff88739a2b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:12:55,838 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testConsecutiveExports/3931075a1273d0e31cda2cc53776b03d 2024-12-02T23:12:55,838 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testConsecutiveExports/3931075a1273d0e31cda2cc53776b03d 2024-12-02T23:12:55,839 INFO [StoreOpener-163ac31eb017c11435b0b7ff88739a2b-1 {}] regionserver.HStore(327): Store=163ac31eb017c11435b0b7ff88739a2b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T23:12:55,840 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1048): stopping wal replay for 3931075a1273d0e31cda2cc53776b03d 2024-12-02T23:12:55,840 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1060): Cleaning up temporary data for 3931075a1273d0e31cda2cc53776b03d 2024-12-02T23:12:55,843 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1038): replaying wal for 163ac31eb017c11435b0b7ff88739a2b 2024-12-02T23:12:55,847 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testConsecutiveExports/163ac31eb017c11435b0b7ff88739a2b 2024-12-02T23:12:55,847 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1093): writing seq id for 3931075a1273d0e31cda2cc53776b03d 2024-12-02T23:12:55,847 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testConsecutiveExports/163ac31eb017c11435b0b7ff88739a2b 2024-12-02T23:12:55,847 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1048): stopping wal replay for 163ac31eb017c11435b0b7ff88739a2b 2024-12-02T23:12:55,847 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1060): Cleaning up temporary data for 163ac31eb017c11435b0b7ff88739a2b 2024-12-02T23:12:55,849 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testConsecutiveExports/3931075a1273d0e31cda2cc53776b03d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T23:12:55,849 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1093): writing seq id for 163ac31eb017c11435b0b7ff88739a2b 2024-12-02T23:12:55,849 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1114): Opened 3931075a1273d0e31cda2cc53776b03d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70755407, jitterRate=0.054337725043296814}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T23:12:55,849 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3931075a1273d0e31cda2cc53776b03d 2024-12-02T23:12:55,850 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1006): Region open journal for 3931075a1273d0e31cda2cc53776b03d: Running coprocessor pre-open hook at 1733181175811Writing region info on filesystem at 1733181175811Initializing all the Stores at 1733181175812 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733181175812Cleaning up temporary data from old regions at 1733181175840 (+28 ms)Running coprocessor post-open hooks at 1733181175849 (+9 ms)Region opened successfully at 1733181175850 (+1 ms) 2024-12-02T23:12:55,851 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,,1733181175438.3931075a1273d0e31cda2cc53776b03d., pid=118, masterSystemTime=1733181175795 2024-12-02T23:12:55,851 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testConsecutiveExports/163ac31eb017c11435b0b7ff88739a2b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T23:12:55,852 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1114): Opened 163ac31eb017c11435b0b7ff88739a2b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68001558, jitterRate=0.01330217719078064}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T23:12:55,852 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 163ac31eb017c11435b0b7ff88739a2b 2024-12-02T23:12:55,852 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1006): Region open journal for 163ac31eb017c11435b0b7ff88739a2b: Running coprocessor pre-open hook at 1733181175811Writing region info on filesystem at 1733181175811Initializing all the Stores at 1733181175815 (+4 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733181175815Cleaning up temporary data from old regions at 1733181175847 (+32 ms)Running coprocessor post-open hooks at 1733181175852 (+5 ms)Region opened successfully at 1733181175852 2024-12-02T23:12:55,852 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,1,1733181175438.163ac31eb017c11435b0b7ff88739a2b., pid=117, masterSystemTime=1733181175794 2024-12-02T23:12:55,853 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,,1733181175438.3931075a1273d0e31cda2cc53776b03d. 2024-12-02T23:12:55,853 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,,1733181175438.3931075a1273d0e31cda2cc53776b03d. 2024-12-02T23:12:55,854 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=3931075a1273d0e31cda2cc53776b03d, regionState=OPEN, openSeqNum=2, regionLocation=9e5ab3b623d7,38049,1733180945779 2024-12-02T23:12:55,855 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,1,1733181175438.163ac31eb017c11435b0b7ff88739a2b. 2024-12-02T23:12:55,855 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,1,1733181175438.163ac31eb017c11435b0b7ff88739a2b. 2024-12-02T23:12:55,855 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=116 updating hbase:meta row=163ac31eb017c11435b0b7ff88739a2b, regionState=OPEN, openSeqNum=2, regionLocation=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:12:55,856 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=118, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3931075a1273d0e31cda2cc53776b03d, server=9e5ab3b623d7,38049,1733180945779 because future has completed 2024-12-02T23:12:55,857 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=117, ppid=116, state=RUNNABLE, hasLock=false; OpenRegionProcedure 163ac31eb017c11435b0b7ff88739a2b, server=9e5ab3b623d7,41311,1733180945828 because future has completed 2024-12-02T23:12:55,862 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=118, resume processing ppid=115 2024-12-02T23:12:55,862 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=118, ppid=115, state=SUCCESS, hasLock=false; OpenRegionProcedure 3931075a1273d0e31cda2cc53776b03d, server=9e5ab3b623d7,38049,1733180945779 in 214 msec 2024-12-02T23:12:55,863 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=115, ppid=114, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=3931075a1273d0e31cda2cc53776b03d, ASSIGN in 375 msec 2024-12-02T23:12:55,863 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=117, resume processing ppid=116 2024-12-02T23:12:55,863 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=117, ppid=116, state=SUCCESS, hasLock=false; OpenRegionProcedure 163ac31eb017c11435b0b7ff88739a2b, server=9e5ab3b623d7,41311,1733180945828 in 216 msec 2024-12-02T23:12:55,865 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=116, resume processing ppid=114 2024-12-02T23:12:55,865 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=116, ppid=114, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=163ac31eb017c11435b0b7ff88739a2b, ASSIGN in 376 msec 2024-12-02T23:12:55,866 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T23:12:55,866 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181175866"}]},"ts":"1733181175866"} 2024-12-02T23:12:55,867 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLED in hbase:meta 2024-12-02T23:12:55,868 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T23:12:55,868 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testConsecutiveExports jenkins: RWXCA 2024-12-02T23:12:55,871 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41311 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-02T23:12:55,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:12:55,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:12:55,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:12:55,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:12:55,875 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-02T23:12:55,875 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-02T23:12:55,875 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-02T23:12:55,875 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-02T23:12:55,877 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=114, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports in 438 msec 2024-12-02T23:12:56,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-02T23:12:56,073 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-02T23:12:56,073 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-02T23:12:56,076 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-12-02T23:12:56,076 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1733181175438.3931075a1273d0e31cda2cc53776b03d. 2024-12-02T23:12:56,076 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T23:12:56,078 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-02T23:12:56,082 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-02T23:12:56,087 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-02T23:12:56,090 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-02T23:12:56,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733181176090 (current time:1733181176090). 2024-12-02T23:12:56,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T23:12:56,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-02T23:12:56,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T23:12:56,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4dda5a95, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:56,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:12:56,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:12:56,091 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:12:56,092 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:12:56,092 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:12:56,092 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@633e4c7f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:56,092 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:12:56,092 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:12:56,092 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:56,093 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58842, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:12:56,094 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76cdfcfa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:56,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:12:56,094 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:12:56,095 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:12:56,096 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41332, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:12:56,097 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:12:56,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:12:56,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:56,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:56,097 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:12:56,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62ba4a1d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:56,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:12:56,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:12:56,099 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:12:56,099 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:12:56,099 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:12:56,099 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27de5576, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:56,099 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:12:56,099 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:12:56,099 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:56,100 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58858, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:12:56,100 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@741f25d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:56,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:12:56,101 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:12:56,102 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:12:56,102 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41344, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:12:56,104 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b., hostname=9e5ab3b623d7,41311,1733180945828, seqNum=2] 2024-12-02T23:12:56,104 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:12:56,105 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58986, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:12:56,106 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:12:56,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor223.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:12:56,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:56,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:56,106 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:12:56,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-02T23:12:56,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T23:12:56,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=119, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-02T23:12:56,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 119 2024-12-02T23:12:56,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-02T23:12:56,110 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T23:12:56,111 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T23:12:56,114 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T23:12:56,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742096_1272 (size=161) 2024-12-02T23:12:56,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742096_1272 (size=161) 2024-12-02T23:12:56,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742096_1272 (size=161) 2024-12-02T23:12:56,122 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T23:12:56,122 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3931075a1273d0e31cda2cc53776b03d}, {pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 163ac31eb017c11435b0b7ff88739a2b}] 2024-12-02T23:12:56,123 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 163ac31eb017c11435b0b7ff88739a2b 2024-12-02T23:12:56,123 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3931075a1273d0e31cda2cc53776b03d 2024-12-02T23:12:56,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-02T23:12:56,275 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41311 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=121 2024-12-02T23:12:56,275 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1733181175438.163ac31eb017c11435b0b7ff88739a2b. 2024-12-02T23:12:56,275 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.HRegion(2603): Flush status journal for 163ac31eb017c11435b0b7ff88739a2b: 2024-12-02T23:12:56,275 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1733181175438.163ac31eb017c11435b0b7ff88739a2b. for emptySnaptb0-testConsecutiveExports completed. 2024-12-02T23:12:56,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1733181175438.163ac31eb017c11435b0b7ff88739a2b.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-02T23:12:56,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:12:56,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T23:12:56,276 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38049 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=120 2024-12-02T23:12:56,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1733181175438.3931075a1273d0e31cda2cc53776b03d. 2024-12-02T23:12:56,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.HRegion(2603): Flush status journal for 3931075a1273d0e31cda2cc53776b03d: 2024-12-02T23:12:56,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1733181175438.3931075a1273d0e31cda2cc53776b03d. for emptySnaptb0-testConsecutiveExports completed. 2024-12-02T23:12:56,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1733181175438.3931075a1273d0e31cda2cc53776b03d.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-02T23:12:56,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:12:56,276 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T23:12:56,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742098_1274 (size=68) 2024-12-02T23:12:56,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742098_1274 (size=68) 2024-12-02T23:12:56,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742098_1274 (size=68) 2024-12-02T23:12:56,295 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1733181175438.3931075a1273d0e31cda2cc53776b03d. 2024-12-02T23:12:56,295 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=120 2024-12-02T23:12:56,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=120 2024-12-02T23:12:56,296 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 3931075a1273d0e31cda2cc53776b03d 2024-12-02T23:12:56,298 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3931075a1273d0e31cda2cc53776b03d 2024-12-02T23:12:56,301 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=120, ppid=119, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3931075a1273d0e31cda2cc53776b03d in 177 msec 2024-12-02T23:12:56,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742097_1273 (size=68) 2024-12-02T23:12:56,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742097_1273 (size=68) 2024-12-02T23:12:56,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742097_1273 (size=68) 2024-12-02T23:12:56,304 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1733181175438.163ac31eb017c11435b0b7ff88739a2b. 2024-12-02T23:12:56,304 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-12-02T23:12:56,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=121 2024-12-02T23:12:56,304 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 163ac31eb017c11435b0b7ff88739a2b 2024-12-02T23:12:56,304 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 163ac31eb017c11435b0b7ff88739a2b 2024-12-02T23:12:56,311 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=121, resume processing ppid=119 2024-12-02T23:12:56,311 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=121, ppid=119, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 163ac31eb017c11435b0b7ff88739a2b in 186 msec 2024-12-02T23:12:56,311 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T23:12:56,312 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T23:12:56,314 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T23:12:56,314 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T23:12:56,314 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:12:56,314 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-02T23:12:56,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742099_1275 (size=60) 2024-12-02T23:12:56,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742099_1275 (size=60) 2024-12-02T23:12:56,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742099_1275 (size=60) 2024-12-02T23:12:56,339 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T23:12:56,339 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testConsecutiveExports 2024-12-02T23:12:56,340 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports 2024-12-02T23:12:56,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742100_1276 (size=641) 2024-12-02T23:12:56,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742100_1276 (size=641) 2024-12-02T23:12:56,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742100_1276 (size=641) 2024-12-02T23:12:56,379 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T23:12:56,383 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T23:12:56,384 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/emptySnaptb0-testConsecutiveExports 2024-12-02T23:12:56,385 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T23:12:56,385 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 119 2024-12-02T23:12:56,386 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=119, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 278 msec 2024-12-02T23:12:56,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-02T23:12:56,423 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-12-02T23:12:56,429 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38049 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,,1733181175438.3931075a1273d0e31cda2cc53776b03d. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T23:12:56,432 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41311 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,1,1733181175438.163ac31eb017c11435b0b7ff88739a2b. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T23:12:56,433 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-02T23:12:56,440 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-12-02T23:12:56,440 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1733181175438.3931075a1273d0e31cda2cc53776b03d. 2024-12-02T23:12:56,440 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T23:12:56,442 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-02T23:12:56,447 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-02T23:12:56,451 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-02T23:12:56,453 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-02T23:12:56,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733181176453 (current time:1733181176453). 2024-12-02T23:12:56,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T23:12:56,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-02T23:12:56,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T23:12:56,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2bbc697e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:56,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:12:56,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:12:56,455 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:12:56,455 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:12:56,455 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:12:56,456 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b391cb0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:56,456 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:12:56,456 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:12:56,456 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:56,457 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58878, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:12:56,457 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c6b05ee, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:56,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:12:56,458 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:12:56,459 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:12:56,459 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41348, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:12:56,460 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:12:56,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:12:56,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:56,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:56,460 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:12:56,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@318149fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:56,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:12:56,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:12:56,462 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:12:56,462 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:12:56,462 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:12:56,462 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26767482, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:56,462 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:12:56,463 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:12:56,463 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:56,463 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58890, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:12:56,464 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27ce8e51, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:12:56,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:12:56,465 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:12:56,466 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:12:56,467 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41350, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:12:56,468 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b., hostname=9e5ab3b623d7,41311,1733180945828, seqNum=2] 2024-12-02T23:12:56,469 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:12:56,469 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58990, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:12:56,470 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:12:56,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor223.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:12:56,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:56,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:12:56,471 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:12:56,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-02T23:12:56,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T23:12:56,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-02T23:12:56,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-02T23:12:56,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-02T23:12:56,474 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T23:12:56,475 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T23:12:56,478 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T23:12:56,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742101_1277 (size=156) 2024-12-02T23:12:56,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742101_1277 (size=156) 2024-12-02T23:12:56,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742101_1277 (size=156) 2024-12-02T23:12:56,497 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T23:12:56,497 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3931075a1273d0e31cda2cc53776b03d}, {pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 163ac31eb017c11435b0b7ff88739a2b}] 2024-12-02T23:12:56,498 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3931075a1273d0e31cda2cc53776b03d 2024-12-02T23:12:56,498 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 163ac31eb017c11435b0b7ff88739a2b 2024-12-02T23:12:56,514 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-12-02T23:12:56,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-02T23:12:56,650 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38049 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=123 2024-12-02T23:12:56,650 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1733181175438.3931075a1273d0e31cda2cc53776b03d. 2024-12-02T23:12:56,651 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2902): Flushing 3931075a1273d0e31cda2cc53776b03d 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-02T23:12:56,651 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41311 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=124 2024-12-02T23:12:56,651 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1733181175438.163ac31eb017c11435b0b7ff88739a2b. 2024-12-02T23:12:56,652 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(2902): Flushing 163ac31eb017c11435b0b7ff88739a2b 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-02T23:12:56,706 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412022b2bb0af5f9c433f9c6b2ebdcaa681f3_3931075a1273d0e31cda2cc53776b03d is 71, key is 07122bab2e02c9c9c2fee00b87c800dd/cf:q/1733181176428/Put/seqid=0 2024-12-02T23:12:56,714 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024120231ac8bd9072c499292f5e28cac0db700_163ac31eb017c11435b0b7ff88739a2b is 71, key is 13b09d7907f69501984835786bb52024/cf:q/1733181176431/Put/seqid=0 2024-12-02T23:12:56,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742102_1278 (size=5102) 2024-12-02T23:12:56,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742102_1278 (size=5102) 2024-12-02T23:12:56,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742102_1278 (size=5102) 2024-12-02T23:12:56,737 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:12:56,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742103_1279 (size=8171) 2024-12-02T23:12:56,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742103_1279 (size=8171) 2024-12-02T23:12:56,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742103_1279 (size=8171) 2024-12-02T23:12:56,743 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:12:56,745 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412022b2bb0af5f9c433f9c6b2ebdcaa681f3_3931075a1273d0e31cda2cc53776b03d to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e202412022b2bb0af5f9c433f9c6b2ebdcaa681f3_3931075a1273d0e31cda2cc53776b03d 2024-12-02T23:12:56,746 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testConsecutiveExports/3931075a1273d0e31cda2cc53776b03d/.tmp/cf/c7d7165c23984f2baea7efe028d36444, store: [table=testtb-testConsecutiveExports family=cf region=3931075a1273d0e31cda2cc53776b03d] 2024-12-02T23:12:56,746 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testConsecutiveExports/3931075a1273d0e31cda2cc53776b03d/.tmp/cf/c7d7165c23984f2baea7efe028d36444 is 206, key is 0bcb031499421ca6022d82ad598fb6ac8/cf:q/1733181176428/Put/seqid=0 2024-12-02T23:12:56,748 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024120231ac8bd9072c499292f5e28cac0db700_163ac31eb017c11435b0b7ff88739a2b to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b2024120231ac8bd9072c499292f5e28cac0db700_163ac31eb017c11435b0b7ff88739a2b 2024-12-02T23:12:56,749 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testConsecutiveExports/163ac31eb017c11435b0b7ff88739a2b/.tmp/cf/daf0ab40b25c4ec68d060bb9e0e17dd9, store: [table=testtb-testConsecutiveExports family=cf region=163ac31eb017c11435b0b7ff88739a2b] 2024-12-02T23:12:56,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testConsecutiveExports/163ac31eb017c11435b0b7ff88739a2b/.tmp/cf/daf0ab40b25c4ec68d060bb9e0e17dd9 is 206, key is 15065db357032bbf97c4016b2b416ca82/cf:q/1733181176431/Put/seqid=0 2024-12-02T23:12:56,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742104_1280 (size=5906) 2024-12-02T23:12:56,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742104_1280 (size=5906) 2024-12-02T23:12:56,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742104_1280 (size=5906) 2024-12-02T23:12:56,753 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testConsecutiveExports/3931075a1273d0e31cda2cc53776b03d/.tmp/cf/c7d7165c23984f2baea7efe028d36444 2024-12-02T23:12:56,761 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testConsecutiveExports/3931075a1273d0e31cda2cc53776b03d/.tmp/cf/c7d7165c23984f2baea7efe028d36444 as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testConsecutiveExports/3931075a1273d0e31cda2cc53776b03d/cf/c7d7165c23984f2baea7efe028d36444 2024-12-02T23:12:56,767 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testConsecutiveExports/3931075a1273d0e31cda2cc53776b03d/cf/c7d7165c23984f2baea7efe028d36444, entries=3, sequenceid=6, filesize=5.8 K 2024-12-02T23:12:56,770 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 3931075a1273d0e31cda2cc53776b03d in 119ms, sequenceid=6, compaction requested=false 2024-12-02T23:12:56,771 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2603): Flush status journal for 3931075a1273d0e31cda2cc53776b03d: 2024-12-02T23:12:56,771 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1733181175438.3931075a1273d0e31cda2cc53776b03d. for snaptb0-testConsecutiveExports completed. 2024-12-02T23:12:56,771 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1733181175438.3931075a1273d0e31cda2cc53776b03d.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-02T23:12:56,771 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:12:56,771 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testConsecutiveExports/3931075a1273d0e31cda2cc53776b03d/cf/c7d7165c23984f2baea7efe028d36444] hfiles 2024-12-02T23:12:56,773 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testConsecutiveExports/3931075a1273d0e31cda2cc53776b03d/cf/c7d7165c23984f2baea7efe028d36444 for snapshot=snaptb0-testConsecutiveExports 2024-12-02T23:12:56,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742105_1281 (size=14853) 2024-12-02T23:12:56,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742105_1281 (size=14853) 2024-12-02T23:12:56,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742105_1281 (size=14853) 2024-12-02T23:12:56,777 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testConsecutiveExports/163ac31eb017c11435b0b7ff88739a2b/.tmp/cf/daf0ab40b25c4ec68d060bb9e0e17dd9 2024-12-02T23:12:56,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testConsecutiveExports/163ac31eb017c11435b0b7ff88739a2b/.tmp/cf/daf0ab40b25c4ec68d060bb9e0e17dd9 as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testConsecutiveExports/163ac31eb017c11435b0b7ff88739a2b/cf/daf0ab40b25c4ec68d060bb9e0e17dd9 2024-12-02T23:12:56,790 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testConsecutiveExports/163ac31eb017c11435b0b7ff88739a2b/cf/daf0ab40b25c4ec68d060bb9e0e17dd9, entries=47, sequenceid=6, filesize=14.5 K 2024-12-02T23:12:56,791 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 163ac31eb017c11435b0b7ff88739a2b in 139ms, sequenceid=6, compaction requested=false 2024-12-02T23:12:56,792 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(2603): Flush status journal for 163ac31eb017c11435b0b7ff88739a2b: 2024-12-02T23:12:56,792 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1733181175438.163ac31eb017c11435b0b7ff88739a2b. for snaptb0-testConsecutiveExports completed. 2024-12-02T23:12:56,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-02T23:12:56,792 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1733181175438.163ac31eb017c11435b0b7ff88739a2b.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-02T23:12:56,792 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:12:56,792 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testConsecutiveExports/163ac31eb017c11435b0b7ff88739a2b/cf/daf0ab40b25c4ec68d060bb9e0e17dd9] hfiles 2024-12-02T23:12:56,792 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testConsecutiveExports/163ac31eb017c11435b0b7ff88739a2b/cf/daf0ab40b25c4ec68d060bb9e0e17dd9 for snapshot=snaptb0-testConsecutiveExports 2024-12-02T23:12:56,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742106_1282 (size=107) 2024-12-02T23:12:56,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742106_1282 (size=107) 2024-12-02T23:12:56,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742106_1282 (size=107) 2024-12-02T23:12:56,796 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1733181175438.3931075a1273d0e31cda2cc53776b03d. 2024-12-02T23:12:56,796 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-12-02T23:12:56,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=123 2024-12-02T23:12:56,796 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 3931075a1273d0e31cda2cc53776b03d 2024-12-02T23:12:56,797 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3931075a1273d0e31cda2cc53776b03d 2024-12-02T23:12:56,803 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=123, ppid=122, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3931075a1273d0e31cda2cc53776b03d in 304 msec 2024-12-02T23:12:56,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742107_1283 (size=107) 2024-12-02T23:12:56,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742107_1283 (size=107) 2024-12-02T23:12:56,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742107_1283 (size=107) 2024-12-02T23:12:56,822 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1733181175438.163ac31eb017c11435b0b7ff88739a2b. 2024-12-02T23:12:56,822 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=124 2024-12-02T23:12:56,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=124 2024-12-02T23:12:56,823 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 163ac31eb017c11435b0b7ff88739a2b 2024-12-02T23:12:56,823 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 163ac31eb017c11435b0b7ff88739a2b 2024-12-02T23:12:56,838 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=124, resume processing ppid=122 2024-12-02T23:12:56,838 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=124, ppid=122, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 163ac31eb017c11435b0b7ff88739a2b in 337 msec 2024-12-02T23:12:56,838 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T23:12:56,839 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T23:12:56,854 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T23:12:56,854 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T23:12:56,854 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:12:56,856 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b2024120231ac8bd9072c499292f5e28cac0db700_163ac31eb017c11435b0b7ff88739a2b, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e202412022b2bb0af5f9c433f9c6b2ebdcaa681f3_3931075a1273d0e31cda2cc53776b03d] hfiles 2024-12-02T23:12:56,856 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b2024120231ac8bd9072c499292f5e28cac0db700_163ac31eb017c11435b0b7ff88739a2b 2024-12-02T23:12:56,856 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e202412022b2bb0af5f9c433f9c6b2ebdcaa681f3_3931075a1273d0e31cda2cc53776b03d 2024-12-02T23:12:56,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742108_1284 (size=291) 2024-12-02T23:12:56,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742108_1284 (size=291) 2024-12-02T23:12:56,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742108_1284 (size=291) 2024-12-02T23:12:57,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-02T23:12:57,328 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T23:12:57,328 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testConsecutiveExports 2024-12-02T23:12:57,330 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-02T23:12:57,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742109_1285 (size=951) 2024-12-02T23:12:57,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742109_1285 (size=951) 2024-12-02T23:12:57,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742109_1285 (size=951) 2024-12-02T23:12:57,395 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T23:12:57,405 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T23:12:57,405 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-02T23:12:57,408 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T23:12:57,408 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-02T23:12:57,410 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=122, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 936 msec 2024-12-02T23:12:57,533 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_0/usercache/jenkins/appcache/application_1733180955226_0004/container_1733180955226_0004_01_000002/launch_container.sh] 2024-12-02T23:12:57,533 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_0/usercache/jenkins/appcache/application_1733180955226_0004/container_1733180955226_0004_01_000002/container_tokens] 2024-12-02T23:12:57,533 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_0/usercache/jenkins/appcache/application_1733180955226_0004/container_1733180955226_0004_01_000002/sysfs] 2024-12-02T23:12:57,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-02T23:12:57,612 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-12-02T23:12:57,613 INFO [Time-limited test {}] snapshot.TestExportSnapshot(523): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181177612 2024-12-02T23:12:57,613 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181177612, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181177612, srcFsUri=hdfs://localhost:33369, srcDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:12:57,662 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:33369, inputRoot=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:12:57,662 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@5011d6cb, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181177612, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181177612/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-02T23:12:57,664 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-02T23:12:57,670 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181177612/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-02T23:12:57,734 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:12:57,735 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:12:57,735 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:12:58,838 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_1/usercache/jenkins/appcache/application_1733180955226_0004/container_1733180955226_0004_01_000005/launch_container.sh] 2024-12-02T23:12:58,838 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_1/usercache/jenkins/appcache/application_1733180955226_0004/container_1733180955226_0004_01_000005/container_tokens] 2024-12-02T23:12:58,838 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_1/usercache/jenkins/appcache/application_1733180955226_0004/container_1733180955226_0004_01_000005/sysfs] 2024-12-02T23:12:58,994 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop-6916575243777872407.jar 2024-12-02T23:12:58,994 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:12:58,995 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:12:59,083 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop-1211998095707385706.jar 2024-12-02T23:12:59,084 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:12:59,084 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:12:59,084 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:12:59,085 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:12:59,085 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:12:59,085 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:12:59,087 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-02T23:12:59,087 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-02T23:12:59,088 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-02T23:12:59,088 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-02T23:12:59,089 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-02T23:12:59,089 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-02T23:12:59,089 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-02T23:12:59,089 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-02T23:12:59,090 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-02T23:12:59,090 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-02T23:12:59,090 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-02T23:12:59,090 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:12:59,091 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:12:59,091 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T23:12:59,091 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:12:59,091 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:12:59,091 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T23:12:59,092 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T23:12:59,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742110_1286 (size=131440) 2024-12-02T23:12:59,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742110_1286 (size=131440) 2024-12-02T23:12:59,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742110_1286 (size=131440) 2024-12-02T23:12:59,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742111_1287 (size=4188619) 2024-12-02T23:12:59,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742111_1287 (size=4188619) 2024-12-02T23:12:59,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742111_1287 (size=4188619) 2024-12-02T23:12:59,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742112_1288 (size=1323991) 2024-12-02T23:12:59,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742112_1288 (size=1323991) 2024-12-02T23:12:59,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742112_1288 (size=1323991) 2024-12-02T23:12:59,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742113_1289 (size=903926) 2024-12-02T23:12:59,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742113_1289 (size=903926) 2024-12-02T23:12:59,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742113_1289 (size=903926) 2024-12-02T23:12:59,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742114_1290 (size=8360083) 2024-12-02T23:12:59,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742114_1290 (size=8360083) 2024-12-02T23:12:59,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742114_1290 (size=8360083) 2024-12-02T23:12:59,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742115_1291 (size=6424743) 2024-12-02T23:12:59,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742115_1291 (size=6424743) 2024-12-02T23:12:59,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742115_1291 (size=6424743) 2024-12-02T23:12:59,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742116_1292 (size=1877034) 2024-12-02T23:12:59,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742116_1292 (size=1877034) 2024-12-02T23:12:59,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742116_1292 (size=1877034) 2024-12-02T23:12:59,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742117_1293 (size=77835) 2024-12-02T23:12:59,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742117_1293 (size=77835) 2024-12-02T23:12:59,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742117_1293 (size=77835) 2024-12-02T23:12:59,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742118_1294 (size=30949) 2024-12-02T23:12:59,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742118_1294 (size=30949) 2024-12-02T23:12:59,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742118_1294 (size=30949) 2024-12-02T23:12:59,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742119_1295 (size=1597213) 2024-12-02T23:12:59,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742119_1295 (size=1597213) 2024-12-02T23:12:59,999 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0004_000001 (auth:SIMPLE) from 127.0.0.1:41714 2024-12-02T23:12:59,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742119_1295 (size=1597213) 2024-12-02T23:13:00,049 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_1/usercache/jenkins/appcache/application_1733180955226_0004/container_1733180955226_0004_01_000001/launch_container.sh] 2024-12-02T23:13:00,049 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_1/usercache/jenkins/appcache/application_1733180955226_0004/container_1733180955226_0004_01_000001/container_tokens] 2024-12-02T23:13:00,049 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_1/usercache/jenkins/appcache/application_1733180955226_0004/container_1733180955226_0004_01_000001/sysfs] 2024-12-02T23:13:00,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742120_1296 (size=4695811) 2024-12-02T23:13:00,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742120_1296 (size=4695811) 2024-12-02T23:13:00,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742120_1296 (size=4695811) 2024-12-02T23:13:00,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742121_1297 (size=232957) 2024-12-02T23:13:00,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742121_1297 (size=232957) 2024-12-02T23:13:00,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742121_1297 (size=232957) 2024-12-02T23:13:00,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742122_1298 (size=127628) 2024-12-02T23:13:00,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742122_1298 (size=127628) 2024-12-02T23:13:00,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742122_1298 (size=127628) 2024-12-02T23:13:00,642 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T23:13:00,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742123_1299 (size=20406) 2024-12-02T23:13:00,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742123_1299 (size=20406) 2024-12-02T23:13:00,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742123_1299 (size=20406) 2024-12-02T23:13:00,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742124_1300 (size=443172) 2024-12-02T23:13:00,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742124_1300 (size=443172) 2024-12-02T23:13:00,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742124_1300 (size=443172) 2024-12-02T23:13:00,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742125_1301 (size=5175431) 2024-12-02T23:13:00,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742125_1301 (size=5175431) 2024-12-02T23:13:00,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742125_1301 (size=5175431) 2024-12-02T23:13:00,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742126_1302 (size=217634) 2024-12-02T23:13:00,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742126_1302 (size=217634) 2024-12-02T23:13:00,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742126_1302 (size=217634) 2024-12-02T23:13:00,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742127_1303 (size=1832290) 2024-12-02T23:13:00,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742127_1303 (size=1832290) 2024-12-02T23:13:00,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742127_1303 (size=1832290) 2024-12-02T23:13:00,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742128_1304 (size=322274) 2024-12-02T23:13:00,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742128_1304 (size=322274) 2024-12-02T23:13:00,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742128_1304 (size=322274) 2024-12-02T23:13:01,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742129_1305 (size=503880) 2024-12-02T23:13:01,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742129_1305 (size=503880) 2024-12-02T23:13:01,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742129_1305 (size=503880) 2024-12-02T23:13:01,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742130_1306 (size=29229) 2024-12-02T23:13:01,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742130_1306 (size=29229) 2024-12-02T23:13:01,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742130_1306 (size=29229) 2024-12-02T23:13:02,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742131_1307 (size=24096) 2024-12-02T23:13:02,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742131_1307 (size=24096) 2024-12-02T23:13:02,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742131_1307 (size=24096) 2024-12-02T23:13:02,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742132_1308 (size=111872) 2024-12-02T23:13:02,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742132_1308 (size=111872) 2024-12-02T23:13:02,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742132_1308 (size=111872) 2024-12-02T23:13:02,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742133_1309 (size=45609) 2024-12-02T23:13:02,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742133_1309 (size=45609) 2024-12-02T23:13:02,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742133_1309 (size=45609) 2024-12-02T23:13:02,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742134_1310 (size=136454) 2024-12-02T23:13:02,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742134_1310 (size=136454) 2024-12-02T23:13:02,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742134_1310 (size=136454) 2024-12-02T23:13:02,069 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-02T23:13:02,071 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-02T23:13:02,073 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.5 K 2024-12-02T23:13:02,073 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-12-02T23:13:02,073 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.8 K 2024-12-02T23:13:02,073 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.0 K 2024-12-02T23:13:02,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742135_1311 (size=1023) 2024-12-02T23:13:02,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742135_1311 (size=1023) 2024-12-02T23:13:02,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742135_1311 (size=1023) 2024-12-02T23:13:02,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742136_1312 (size=35) 2024-12-02T23:13:02,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742136_1312 (size=35) 2024-12-02T23:13:02,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742136_1312 (size=35) 2024-12-02T23:13:02,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742137_1313 (size=304041) 2024-12-02T23:13:02,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742137_1313 (size=304041) 2024-12-02T23:13:02,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742137_1313 (size=304041) 2024-12-02T23:13:02,144 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T23:13:02,144 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T23:13:02,800 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0005_000001 (auth:SIMPLE) from 127.0.0.1:37336 2024-12-02T23:13:03,584 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T23:13:05,205 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-02T23:13:05,205 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports Metrics about Tables on a single HBase RegionServer 2024-12-02T23:13:09,260 DEBUG [master/9e5ab3b623d7:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 3931075a1273d0e31cda2cc53776b03d changed from -1.0 to 0.0, refreshing cache 2024-12-02T23:13:09,260 DEBUG [master/9e5ab3b623d7:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 163ac31eb017c11435b0b7ff88739a2b changed from -1.0 to 0.0, refreshing cache 2024-12-02T23:13:11,089 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0005_000001 (auth:SIMPLE) from 127.0.0.1:53490 2024-12-02T23:13:11,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742138_1314 (size=349739) 2024-12-02T23:13:11,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742138_1314 (size=349739) 2024-12-02T23:13:11,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742138_1314 (size=349739) 2024-12-02T23:13:13,383 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0005_000001 (auth:SIMPLE) from 127.0.0.1:40042 2024-12-02T23:13:13,385 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0005_000001 (auth:SIMPLE) from 127.0.0.1:55118 2024-12-02T23:13:14,195 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0005_000001 (auth:SIMPLE) from 127.0.0.1:40044 2024-12-02T23:13:14,207 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0005_000001 (auth:SIMPLE) from 127.0.0.1:55132 2024-12-02T23:13:17,006 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733180955226_0005_01_000006 while processing FINISH_CONTAINERS event 2024-12-02T23:13:20,151 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_3/usercache/jenkins/appcache/application_1733180955226_0005/container_1733180955226_0005_01_000002/launch_container.sh] 2024-12-02T23:13:20,152 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_3/usercache/jenkins/appcache/application_1733180955226_0005/container_1733180955226_0005_01_000002/container_tokens] 2024-12-02T23:13:20,152 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_3/usercache/jenkins/appcache/application_1733180955226_0005/container_1733180955226_0005_01_000002/sysfs] 2024-12-02T23:13:21,043 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_2/usercache/jenkins/appcache/application_1733180955226_0005/container_1733180955226_0005_01_000004/launch_container.sh] 2024-12-02T23:13:21,043 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_2/usercache/jenkins/appcache/application_1733180955226_0005/container_1733180955226_0005_01_000004/container_tokens] 2024-12-02T23:13:21,043 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_2/usercache/jenkins/appcache/application_1733180955226_0005/container_1733180955226_0005_01_000004/sysfs] 2024-12-02T23:13:22,133 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_1/usercache/jenkins/appcache/application_1733180955226_0005/container_1733180955226_0005_01_000005/launch_container.sh] 2024-12-02T23:13:22,133 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_1/usercache/jenkins/appcache/application_1733180955226_0005/container_1733180955226_0005_01_000005/container_tokens] 2024-12-02T23:13:22,133 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_1/usercache/jenkins/appcache/application_1733180955226_0005/container_1733180955226_0005_01_000005/sysfs] 2024-12-02T23:13:22,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742139_1315 (size=31810) 2024-12-02T23:13:22,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742139_1315 (size=31810) 2024-12-02T23:13:22,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742139_1315 (size=31810) 2024-12-02T23:13:22,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742140_1316 (size=463) 2024-12-02T23:13:22,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742140_1316 (size=463) 2024-12-02T23:13:22,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742140_1316 (size=463) 2024-12-02T23:13:22,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742141_1317 (size=31810) 2024-12-02T23:13:22,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742141_1317 (size=31810) 2024-12-02T23:13:22,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742141_1317 (size=31810) 2024-12-02T23:13:22,390 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_0/usercache/jenkins/appcache/application_1733180955226_0005/container_1733180955226_0005_01_000003/launch_container.sh] 2024-12-02T23:13:22,390 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_0/usercache/jenkins/appcache/application_1733180955226_0005/container_1733180955226_0005_01_000003/container_tokens] 2024-12-02T23:13:22,390 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_0/usercache/jenkins/appcache/application_1733180955226_0005/container_1733180955226_0005_01_000003/sysfs] 2024-12-02T23:13:22,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742142_1318 (size=349739) 2024-12-02T23:13:22,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742142_1318 (size=349739) 2024-12-02T23:13:22,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742142_1318 (size=349739) 2024-12-02T23:13:22,462 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0005_000001 (auth:SIMPLE) from 127.0.0.1:55392 2024-12-02T23:13:24,440 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-02T23:13:24,440 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-02T23:13:24,445 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testConsecutiveExports 2024-12-02T23:13:24,445 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-02T23:13:24,445 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-02T23:13:24,446 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_71179475_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-02T23:13:24,447 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-02T23:13:24,447 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-02T23:13:24,447 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in org.apache.hadoop.fs.LocalFileSystem@5011d6cb in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181177612/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181177612/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-02T23:13:24,448 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181177612/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-02T23:13:24,448 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181177612/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-02T23:13:24,450 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181177612, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181177612, srcFsUri=hdfs://localhost:33369, srcDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:13:24,496 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:33369, inputRoot=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:13:24,496 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@5011d6cb, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181177612, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181177612/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-02T23:13:24,499 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-02T23:13:24,507 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181177612/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-02T23:13:24,547 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:13:24,547 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:13:24,547 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:13:25,812 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop-9915935603069638697.jar 2024-12-02T23:13:25,812 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:13:25,813 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:13:25,886 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop-10240851877303846746.jar 2024-12-02T23:13:25,886 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:13:25,887 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:13:25,887 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:13:25,887 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:13:25,887 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:13:25,888 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:13:25,888 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-02T23:13:25,888 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-02T23:13:25,888 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-02T23:13:25,888 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-02T23:13:25,889 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-02T23:13:25,889 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-02T23:13:25,889 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-02T23:13:25,889 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-02T23:13:25,889 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-02T23:13:25,890 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-02T23:13:25,890 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-02T23:13:25,890 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:13:25,890 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:13:25,891 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T23:13:25,891 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:13:25,891 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:13:25,891 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T23:13:25,892 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T23:13:25,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742143_1319 (size=131440) 2024-12-02T23:13:25,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742143_1319 (size=131440) 2024-12-02T23:13:25,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742143_1319 (size=131440) 2024-12-02T23:13:25,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742144_1320 (size=4188619) 2024-12-02T23:13:25,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742144_1320 (size=4188619) 2024-12-02T23:13:25,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742144_1320 (size=4188619) 2024-12-02T23:13:26,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742145_1321 (size=1323991) 2024-12-02T23:13:26,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742145_1321 (size=1323991) 2024-12-02T23:13:26,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742145_1321 (size=1323991) 2024-12-02T23:13:26,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742146_1322 (size=903926) 2024-12-02T23:13:26,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742146_1322 (size=903926) 2024-12-02T23:13:26,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742146_1322 (size=903926) 2024-12-02T23:13:26,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742147_1323 (size=8360083) 2024-12-02T23:13:26,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742147_1323 (size=8360083) 2024-12-02T23:13:26,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742147_1323 (size=8360083) 2024-12-02T23:13:26,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742148_1324 (size=1877034) 2024-12-02T23:13:26,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742148_1324 (size=1877034) 2024-12-02T23:13:26,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742148_1324 (size=1877034) 2024-12-02T23:13:26,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742149_1325 (size=77835) 2024-12-02T23:13:26,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742149_1325 (size=77835) 2024-12-02T23:13:26,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742149_1325 (size=77835) 2024-12-02T23:13:26,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742150_1326 (size=30949) 2024-12-02T23:13:26,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742150_1326 (size=30949) 2024-12-02T23:13:26,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742150_1326 (size=30949) 2024-12-02T23:13:26,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742151_1327 (size=443172) 2024-12-02T23:13:26,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742151_1327 (size=443172) 2024-12-02T23:13:26,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742151_1327 (size=443172) 2024-12-02T23:13:26,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742152_1328 (size=1597213) 2024-12-02T23:13:26,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742152_1328 (size=1597213) 2024-12-02T23:13:26,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742152_1328 (size=1597213) 2024-12-02T23:13:26,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742153_1329 (size=6424743) 2024-12-02T23:13:26,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742153_1329 (size=6424743) 2024-12-02T23:13:26,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742153_1329 (size=6424743) 2024-12-02T23:13:26,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742154_1330 (size=4695811) 2024-12-02T23:13:26,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742154_1330 (size=4695811) 2024-12-02T23:13:26,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742154_1330 (size=4695811) 2024-12-02T23:13:26,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742155_1331 (size=232957) 2024-12-02T23:13:26,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742155_1331 (size=232957) 2024-12-02T23:13:26,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742155_1331 (size=232957) 2024-12-02T23:13:26,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742156_1332 (size=127628) 2024-12-02T23:13:26,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742156_1332 (size=127628) 2024-12-02T23:13:26,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742156_1332 (size=127628) 2024-12-02T23:13:26,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742157_1333 (size=20406) 2024-12-02T23:13:26,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742157_1333 (size=20406) 2024-12-02T23:13:26,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742157_1333 (size=20406) 2024-12-02T23:13:26,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742158_1334 (size=5175431) 2024-12-02T23:13:26,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742158_1334 (size=5175431) 2024-12-02T23:13:26,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742158_1334 (size=5175431) 2024-12-02T23:13:26,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742159_1335 (size=217634) 2024-12-02T23:13:26,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742159_1335 (size=217634) 2024-12-02T23:13:26,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742159_1335 (size=217634) 2024-12-02T23:13:26,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742160_1336 (size=1832290) 2024-12-02T23:13:26,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742160_1336 (size=1832290) 2024-12-02T23:13:26,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742160_1336 (size=1832290) 2024-12-02T23:13:26,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742161_1337 (size=322274) 2024-12-02T23:13:26,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742161_1337 (size=322274) 2024-12-02T23:13:26,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742161_1337 (size=322274) 2024-12-02T23:13:27,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742162_1338 (size=503880) 2024-12-02T23:13:27,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742162_1338 (size=503880) 2024-12-02T23:13:27,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742162_1338 (size=503880) 2024-12-02T23:13:27,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742163_1339 (size=29229) 2024-12-02T23:13:27,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742163_1339 (size=29229) 2024-12-02T23:13:27,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742163_1339 (size=29229) 2024-12-02T23:13:27,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742164_1340 (size=24096) 2024-12-02T23:13:27,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742164_1340 (size=24096) 2024-12-02T23:13:27,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742164_1340 (size=24096) 2024-12-02T23:13:27,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742165_1341 (size=111872) 2024-12-02T23:13:27,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742165_1341 (size=111872) 2024-12-02T23:13:27,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742165_1341 (size=111872) 2024-12-02T23:13:27,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742166_1342 (size=45609) 2024-12-02T23:13:27,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742166_1342 (size=45609) 2024-12-02T23:13:27,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742166_1342 (size=45609) 2024-12-02T23:13:27,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742167_1343 (size=136454) 2024-12-02T23:13:27,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742167_1343 (size=136454) 2024-12-02T23:13:27,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742167_1343 (size=136454) 2024-12-02T23:13:27,495 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-02T23:13:27,498 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-02T23:13:27,499 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.5 K 2024-12-02T23:13:27,499 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-12-02T23:13:27,499 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.8 K 2024-12-02T23:13:27,499 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.0 K 2024-12-02T23:13:27,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742168_1344 (size=1023) 2024-12-02T23:13:27,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742168_1344 (size=1023) 2024-12-02T23:13:27,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742168_1344 (size=1023) 2024-12-02T23:13:27,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742169_1345 (size=35) 2024-12-02T23:13:27,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742169_1345 (size=35) 2024-12-02T23:13:27,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742169_1345 (size=35) 2024-12-02T23:13:27,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742170_1346 (size=304043) 2024-12-02T23:13:27,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742170_1346 (size=304043) 2024-12-02T23:13:27,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742170_1346 (size=304043) 2024-12-02T23:13:28,540 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T23:13:28,540 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T23:13:28,543 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0005_000001 (auth:SIMPLE) from 127.0.0.1:44916 2024-12-02T23:13:28,554 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_0/usercache/jenkins/appcache/application_1733180955226_0005/container_1733180955226_0005_01_000001/launch_container.sh] 2024-12-02T23:13:28,554 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_0/usercache/jenkins/appcache/application_1733180955226_0005/container_1733180955226_0005_01_000001/container_tokens] 2024-12-02T23:13:28,554 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_0/usercache/jenkins/appcache/application_1733180955226_0005/container_1733180955226_0005_01_000001/sysfs] 2024-12-02T23:13:29,374 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0006_000001 (auth:SIMPLE) from 127.0.0.1:55404 2024-12-02T23:13:33,597 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T23:13:38,986 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0006_000001 (auth:SIMPLE) from 127.0.0.1:51296 2024-12-02T23:13:39,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742171_1347 (size=349741) 2024-12-02T23:13:39,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742171_1347 (size=349741) 2024-12-02T23:13:39,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742171_1347 (size=349741) 2024-12-02T23:13:40,810 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 163ac31eb017c11435b0b7ff88739a2b, had cached 0 bytes from a total of 14853 2024-12-02T23:13:40,810 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 3931075a1273d0e31cda2cc53776b03d, had cached 0 bytes from a total of 5906 2024-12-02T23:13:41,237 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0006_000001 (auth:SIMPLE) from 127.0.0.1:53966 2024-12-02T23:13:41,237 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0006_000001 (auth:SIMPLE) from 127.0.0.1:39358 2024-12-02T23:13:42,068 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0006_000001 (auth:SIMPLE) from 127.0.0.1:34712 2024-12-02T23:13:42,068 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0006_000001 (auth:SIMPLE) from 127.0.0.1:34698 2024-12-02T23:13:44,555 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733180955226_0006_01_000006 while processing FINISH_CONTAINERS event 2024-12-02T23:13:48,163 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_3/usercache/jenkins/appcache/application_1733180955226_0006/container_1733180955226_0006_01_000002/launch_container.sh] 2024-12-02T23:13:48,164 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_3/usercache/jenkins/appcache/application_1733180955226_0006/container_1733180955226_0006_01_000002/container_tokens] 2024-12-02T23:13:48,164 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_3/usercache/jenkins/appcache/application_1733180955226_0006/container_1733180955226_0006_01_000002/sysfs] 2024-12-02T23:13:50,723 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_3/usercache/jenkins/appcache/application_1733180955226_0006/container_1733180955226_0006_01_000004/launch_container.sh] 2024-12-02T23:13:50,723 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_3/usercache/jenkins/appcache/application_1733180955226_0006/container_1733180955226_0006_01_000004/container_tokens] 2024-12-02T23:13:50,723 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_3/usercache/jenkins/appcache/application_1733180955226_0006/container_1733180955226_0006_01_000004/sysfs] 2024-12-02T23:13:50,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742172_1348 (size=29743) 2024-12-02T23:13:50,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742172_1348 (size=29743) 2024-12-02T23:13:50,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742172_1348 (size=29743) 2024-12-02T23:13:50,964 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_2/usercache/jenkins/appcache/application_1733180955226_0006/container_1733180955226_0006_01_000003/launch_container.sh] 2024-12-02T23:13:50,964 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_2/usercache/jenkins/appcache/application_1733180955226_0006/container_1733180955226_0006_01_000003/container_tokens] 2024-12-02T23:13:50,964 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_2/usercache/jenkins/appcache/application_1733180955226_0006/container_1733180955226_0006_01_000003/sysfs] 2024-12-02T23:13:50,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742173_1349 (size=463) 2024-12-02T23:13:50,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742173_1349 (size=463) 2024-12-02T23:13:50,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742173_1349 (size=463) 2024-12-02T23:13:51,072 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_3/usercache/jenkins/appcache/application_1733180955226_0006/container_1733180955226_0006_01_000005/launch_container.sh] 2024-12-02T23:13:51,072 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_3/usercache/jenkins/appcache/application_1733180955226_0006/container_1733180955226_0006_01_000005/container_tokens] 2024-12-02T23:13:51,072 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_3/usercache/jenkins/appcache/application_1733180955226_0006/container_1733180955226_0006_01_000005/sysfs] 2024-12-02T23:13:51,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742174_1350 (size=29743) 2024-12-02T23:13:51,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742174_1350 (size=29743) 2024-12-02T23:13:51,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742174_1350 (size=29743) 2024-12-02T23:13:51,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742175_1351 (size=349741) 2024-12-02T23:13:51,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742175_1351 (size=349741) 2024-12-02T23:13:51,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742175_1351 (size=349741) 2024-12-02T23:13:53,013 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-02T23:13:53,013 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-02T23:13:53,019 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testConsecutiveExports 2024-12-02T23:13:53,020 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-02T23:13:53,020 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-02T23:13:53,020 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_71179475_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-02T23:13:53,022 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-02T23:13:53,022 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-02T23:13:53,022 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in org.apache.hadoop.fs.LocalFileSystem@5011d6cb in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181177612/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181177612/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-02T23:13:53,023 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181177612/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-02T23:13:53,023 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181177612/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-02T23:13:53,053 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testConsecutiveExports 2024-12-02T23:13:53,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=125, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports 2024-12-02T23:13:53,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-12-02T23:13:53,057 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181233057"}]},"ts":"1733181233057"} 2024-12-02T23:13:53,060 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLING in hbase:meta 2024-12-02T23:13:53,060 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testConsecutiveExports to state=DISABLING 2024-12-02T23:13:53,062 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports}] 2024-12-02T23:13:53,065 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=3931075a1273d0e31cda2cc53776b03d, UNASSIGN}, {pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=163ac31eb017c11435b0b7ff88739a2b, UNASSIGN}] 2024-12-02T23:13:53,066 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=3931075a1273d0e31cda2cc53776b03d, UNASSIGN 2024-12-02T23:13:53,066 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=163ac31eb017c11435b0b7ff88739a2b, UNASSIGN 2024-12-02T23:13:53,067 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=128 updating hbase:meta row=163ac31eb017c11435b0b7ff88739a2b, regionState=CLOSING, regionLocation=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:13:53,067 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=3931075a1273d0e31cda2cc53776b03d, regionState=CLOSING, regionLocation=9e5ab3b623d7,38049,1733180945779 2024-12-02T23:13:53,070 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=163ac31eb017c11435b0b7ff88739a2b, UNASSIGN because future has completed 2024-12-02T23:13:53,070 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T23:13:53,070 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE, hasLock=false; CloseRegionProcedure 163ac31eb017c11435b0b7ff88739a2b, server=9e5ab3b623d7,41311,1733180945828}] 2024-12-02T23:13:53,071 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=3931075a1273d0e31cda2cc53776b03d, UNASSIGN because future has completed 2024-12-02T23:13:53,073 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T23:13:53,073 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=130, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3931075a1273d0e31cda2cc53776b03d, server=9e5ab3b623d7,38049,1733180945779}] 2024-12-02T23:13:53,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-12-02T23:13:53,233 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(122): Close 163ac31eb017c11435b0b7ff88739a2b 2024-12-02T23:13:53,233 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T23:13:53,233 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1722): Closing 163ac31eb017c11435b0b7ff88739a2b, disabling compactions & flushes 2024-12-02T23:13:53,233 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1733181175438.163ac31eb017c11435b0b7ff88739a2b. 2024-12-02T23:13:53,233 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1733181175438.163ac31eb017c11435b0b7ff88739a2b. 2024-12-02T23:13:53,233 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1733181175438.163ac31eb017c11435b0b7ff88739a2b. after waiting 0 ms 2024-12-02T23:13:53,233 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1733181175438.163ac31eb017c11435b0b7ff88739a2b. 2024-12-02T23:13:53,235 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(122): Close 3931075a1273d0e31cda2cc53776b03d 2024-12-02T23:13:53,235 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T23:13:53,235 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1722): Closing 3931075a1273d0e31cda2cc53776b03d, disabling compactions & flushes 2024-12-02T23:13:53,235 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1733181175438.3931075a1273d0e31cda2cc53776b03d. 2024-12-02T23:13:53,235 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1733181175438.3931075a1273d0e31cda2cc53776b03d. 2024-12-02T23:13:53,235 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1733181175438.3931075a1273d0e31cda2cc53776b03d. after waiting 0 ms 2024-12-02T23:13:53,235 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1733181175438.3931075a1273d0e31cda2cc53776b03d. 2024-12-02T23:13:53,267 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testConsecutiveExports/3931075a1273d0e31cda2cc53776b03d/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T23:13:53,268 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:13:53,268 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1733181175438.3931075a1273d0e31cda2cc53776b03d. 2024-12-02T23:13:53,268 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1676): Region close journal for 3931075a1273d0e31cda2cc53776b03d: Waiting for close lock at 1733181233235Running coprocessor pre-close hooks at 1733181233235Disabling compacts and flushes for region at 1733181233235Disabling writes for close at 1733181233235Writing region close event to WAL at 1733181233253 (+18 ms)Running coprocessor post-close hooks at 1733181233268 (+15 ms)Closed at 1733181233268 2024-12-02T23:13:53,270 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(157): Closed 3931075a1273d0e31cda2cc53776b03d 2024-12-02T23:13:53,272 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=3931075a1273d0e31cda2cc53776b03d, regionState=CLOSED 2024-12-02T23:13:53,277 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=130, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3931075a1273d0e31cda2cc53776b03d, server=9e5ab3b623d7,38049,1733180945779 because future has completed 2024-12-02T23:13:53,282 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testConsecutiveExports/163ac31eb017c11435b0b7ff88739a2b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T23:13:53,283 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:13:53,283 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1733181175438.163ac31eb017c11435b0b7ff88739a2b. 2024-12-02T23:13:53,283 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1676): Region close journal for 163ac31eb017c11435b0b7ff88739a2b: Waiting for close lock at 1733181233233Running coprocessor pre-close hooks at 1733181233233Disabling compacts and flushes for region at 1733181233233Disabling writes for close at 1733181233233Writing region close event to WAL at 1733181233234 (+1 ms)Running coprocessor post-close hooks at 1733181233283 (+49 ms)Closed at 1733181233283 2024-12-02T23:13:53,287 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(157): Closed 163ac31eb017c11435b0b7ff88739a2b 2024-12-02T23:13:53,287 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=130, resume processing ppid=127 2024-12-02T23:13:53,288 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=130, ppid=127, state=SUCCESS, hasLock=false; CloseRegionProcedure 3931075a1273d0e31cda2cc53776b03d, server=9e5ab3b623d7,38049,1733180945779 in 210 msec 2024-12-02T23:13:53,289 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=128 updating hbase:meta row=163ac31eb017c11435b0b7ff88739a2b, regionState=CLOSED 2024-12-02T23:13:53,297 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=129, ppid=128, state=RUNNABLE, hasLock=false; CloseRegionProcedure 163ac31eb017c11435b0b7ff88739a2b, server=9e5ab3b623d7,41311,1733180945828 because future has completed 2024-12-02T23:13:53,299 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=127, ppid=126, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=3931075a1273d0e31cda2cc53776b03d, UNASSIGN in 223 msec 2024-12-02T23:13:53,302 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=129, resume processing ppid=128 2024-12-02T23:13:53,302 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=129, ppid=128, state=SUCCESS, hasLock=false; CloseRegionProcedure 163ac31eb017c11435b0b7ff88739a2b, server=9e5ab3b623d7,41311,1733180945828 in 229 msec 2024-12-02T23:13:53,304 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=128, resume processing ppid=126 2024-12-02T23:13:53,304 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=128, ppid=126, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=163ac31eb017c11435b0b7ff88739a2b, UNASSIGN in 237 msec 2024-12-02T23:13:53,307 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=126, resume processing ppid=125 2024-12-02T23:13:53,307 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=126, ppid=125, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports in 243 msec 2024-12-02T23:13:53,309 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181233309"}]},"ts":"1733181233309"} 2024-12-02T23:13:53,311 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLED in hbase:meta 2024-12-02T23:13:53,311 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testConsecutiveExports to state=DISABLED 2024-12-02T23:13:53,314 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=125, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports in 259 msec 2024-12-02T23:13:53,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-12-02T23:13:53,373 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-02T23:13:53,373 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testConsecutiveExports 2024-12-02T23:13:53,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=131, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-02T23:13:53,383 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=131, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-02T23:13:53,384 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=131, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-02T23:13:53,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testConsecutiveExports 2024-12-02T23:13:53,392 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41311 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testConsecutiveExports 2024-12-02T23:13:53,395 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testConsecutiveExports/3931075a1273d0e31cda2cc53776b03d 2024-12-02T23:13:53,395 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testConsecutiveExports/163ac31eb017c11435b0b7ff88739a2b 2024-12-02T23:13:53,398 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testConsecutiveExports/3931075a1273d0e31cda2cc53776b03d/cf, FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testConsecutiveExports/3931075a1273d0e31cda2cc53776b03d/recovered.edits] 2024-12-02T23:13:53,399 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testConsecutiveExports/163ac31eb017c11435b0b7ff88739a2b/cf, FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testConsecutiveExports/163ac31eb017c11435b0b7ff88739a2b/recovered.edits] 2024-12-02T23:13:53,404 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testConsecutiveExports/3931075a1273d0e31cda2cc53776b03d/cf/c7d7165c23984f2baea7efe028d36444 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testConsecutiveExports/3931075a1273d0e31cda2cc53776b03d/cf/c7d7165c23984f2baea7efe028d36444 2024-12-02T23:13:53,408 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testConsecutiveExports/3931075a1273d0e31cda2cc53776b03d/recovered.edits/9.seqid to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testConsecutiveExports/3931075a1273d0e31cda2cc53776b03d/recovered.edits/9.seqid 2024-12-02T23:13:53,409 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testConsecutiveExports/3931075a1273d0e31cda2cc53776b03d 2024-12-02T23:13:53,413 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testConsecutiveExports/163ac31eb017c11435b0b7ff88739a2b/cf/daf0ab40b25c4ec68d060bb9e0e17dd9 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testConsecutiveExports/163ac31eb017c11435b0b7ff88739a2b/cf/daf0ab40b25c4ec68d060bb9e0e17dd9 2024-12-02T23:13:53,417 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testConsecutiveExports/163ac31eb017c11435b0b7ff88739a2b/recovered.edits/9.seqid to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testConsecutiveExports/163ac31eb017c11435b0b7ff88739a2b/recovered.edits/9.seqid 2024-12-02T23:13:53,417 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testConsecutiveExports/163ac31eb017c11435b0b7ff88739a2b 2024-12-02T23:13:53,417 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testConsecutiveExports regions 2024-12-02T23:13:53,418 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac 2024-12-02T23:13:53,419 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf] 2024-12-02T23:13:53,419 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-02T23:13:53,419 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-02T23:13:53,419 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-02T23:13:53,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-02T23:13:53,427 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b2024120231ac8bd9072c499292f5e28cac0db700_163ac31eb017c11435b0b7ff88739a2b to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b2024120231ac8bd9072c499292f5e28cac0db700_163ac31eb017c11435b0b7ff88739a2b 2024-12-02T23:13:53,428 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e202412022b2bb0af5f9c433f9c6b2ebdcaa681f3_3931075a1273d0e31cda2cc53776b03d to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e202412022b2bb0af5f9c433f9c6b2ebdcaa681f3_3931075a1273d0e31cda2cc53776b03d 2024-12-02T23:13:53,429 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac 2024-12-02T23:13:53,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:13:53,435 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data null 2024-12-02T23:13:53,435 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-02T23:13:53,435 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data null 2024-12-02T23:13:53,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:13:53,435 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-02T23:13:53,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:13:53,435 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data null 2024-12-02T23:13:53,435 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-02T23:13:53,435 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data null 2024-12-02T23:13:53,435 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-02T23:13:53,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:13:53,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-12-02T23:13:53,451 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=131, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-02T23:13:53,454 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testConsecutiveExports from hbase:meta 2024-12-02T23:13:53,458 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testConsecutiveExports' descriptor. 2024-12-02T23:13:53,461 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=131, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-02T23:13:53,461 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testConsecutiveExports' from region states. 2024-12-02T23:13:53,461 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,,1733181175438.3931075a1273d0e31cda2cc53776b03d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733181233461"}]},"ts":"9223372036854775807"} 2024-12-02T23:13:53,462 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,1,1733181175438.163ac31eb017c11435b0b7ff88739a2b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733181233461"}]},"ts":"9223372036854775807"} 2024-12-02T23:13:53,464 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-02T23:13:53,464 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 3931075a1273d0e31cda2cc53776b03d, NAME => 'testtb-testConsecutiveExports,,1733181175438.3931075a1273d0e31cda2cc53776b03d.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 163ac31eb017c11435b0b7ff88739a2b, NAME => 'testtb-testConsecutiveExports,1,1733181175438.163ac31eb017c11435b0b7ff88739a2b.', STARTKEY => '1', ENDKEY => ''}] 2024-12-02T23:13:53,464 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testConsecutiveExports' as deleted. 2024-12-02T23:13:53,465 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733181233464"}]},"ts":"9223372036854775807"} 2024-12-02T23:13:53,467 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testConsecutiveExports state from META 2024-12-02T23:13:53,469 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=131, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-02T23:13:53,472 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=131, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports in 96 msec 2024-12-02T23:13:53,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-12-02T23:13:53,544 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testConsecutiveExports 2024-12-02T23:13:53,544 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-02T23:13:53,563 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testConsecutiveExports" type: DISABLED 2024-12-02T23:13:53,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testConsecutiveExports 2024-12-02T23:13:53,568 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testConsecutiveExports" type: DISABLED 2024-12-02T23:13:53,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testConsecutiveExports 2024-12-02T23:13:53,614 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testConsecutiveExports Thread=799 (was 800), OpenFileDescriptor=781 (was 798), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1154 (was 929) - SystemLoadAverage LEAK? -, ProcessCount=19 (was 17) - ProcessCount LEAK? -, AvailableMemoryMB=1832 (was 1721) - AvailableMemoryMB LEAK? - 2024-12-02T23:13:53,614 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=799 is superior to 500 2024-12-02T23:13:53,639 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=799, OpenFileDescriptor=781, MaxFileDescriptor=1048576, SystemLoadAverage=1154, ProcessCount=19, AvailableMemoryMB=1829 2024-12-02T23:13:53,640 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=799 is superior to 500 2024-12-02T23:13:53,642 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T23:13:53,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=132, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T23:13:53,660 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T23:13:53,660 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion" procId is: 132 2024-12-02T23:13:53,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-02T23:13:53,661 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T23:13:53,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742176_1352 (size=458) 2024-12-02T23:13:53,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742176_1352 (size=458) 2024-12-02T23:13:53,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742176_1352 (size=458) 2024-12-02T23:13:53,694 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 65f1f4f527867ff1a6f9bc6f8478cb64, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733181233641.65f1f4f527867ff1a6f9bc6f8478cb64.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:13:53,696 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 4adc1b05c211d1eb3660387c9531d6b5, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733181233641.4adc1b05c211d1eb3660387c9531d6b5.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:13:53,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-02T23:13:53,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742177_1353 (size=83) 2024-12-02T23:13:53,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742177_1353 (size=83) 2024-12-02T23:13:53,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742177_1353 (size=83) 2024-12-02T23:13:53,783 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1733181233641.65f1f4f527867ff1a6f9bc6f8478cb64.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:13:53,784 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1722): Closing 65f1f4f527867ff1a6f9bc6f8478cb64, disabling compactions & flushes 2024-12-02T23:13:53,784 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1733181233641.65f1f4f527867ff1a6f9bc6f8478cb64. 2024-12-02T23:13:53,784 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733181233641.65f1f4f527867ff1a6f9bc6f8478cb64. 2024-12-02T23:13:53,784 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733181233641.65f1f4f527867ff1a6f9bc6f8478cb64. after waiting 0 ms 2024-12-02T23:13:53,784 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1733181233641.65f1f4f527867ff1a6f9bc6f8478cb64. 2024-12-02T23:13:53,784 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1733181233641.65f1f4f527867ff1a6f9bc6f8478cb64. 2024-12-02T23:13:53,784 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 65f1f4f527867ff1a6f9bc6f8478cb64: Waiting for close lock at 1733181233783Disabling compacts and flushes for region at 1733181233783Disabling writes for close at 1733181233784 (+1 ms)Writing region close event to WAL at 1733181233784Closed at 1733181233784 2024-12-02T23:13:53,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742178_1354 (size=83) 2024-12-02T23:13:53,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742178_1354 (size=83) 2024-12-02T23:13:53,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742178_1354 (size=83) 2024-12-02T23:13:53,787 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1733181233641.4adc1b05c211d1eb3660387c9531d6b5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:13:53,787 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1722): Closing 4adc1b05c211d1eb3660387c9531d6b5, disabling compactions & flushes 2024-12-02T23:13:53,787 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1733181233641.4adc1b05c211d1eb3660387c9531d6b5. 2024-12-02T23:13:53,787 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733181233641.4adc1b05c211d1eb3660387c9531d6b5. 2024-12-02T23:13:53,787 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733181233641.4adc1b05c211d1eb3660387c9531d6b5. after waiting 0 ms 2024-12-02T23:13:53,787 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1733181233641.4adc1b05c211d1eb3660387c9531d6b5. 2024-12-02T23:13:53,787 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1733181233641.4adc1b05c211d1eb3660387c9531d6b5. 2024-12-02T23:13:53,787 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for 4adc1b05c211d1eb3660387c9531d6b5: Waiting for close lock at 1733181233787Disabling compacts and flushes for region at 1733181233787Disabling writes for close at 1733181233787Writing region close event to WAL at 1733181233787Closed at 1733181233787 2024-12-02T23:13:53,789 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T23:13:53,789 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1733181233641.65f1f4f527867ff1a6f9bc6f8478cb64.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733181233789"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733181233789"}]},"ts":"1733181233789"} 2024-12-02T23:13:53,789 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1733181233641.4adc1b05c211d1eb3660387c9531d6b5.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733181233789"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733181233789"}]},"ts":"1733181233789"} 2024-12-02T23:13:53,792 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T23:13:53,793 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T23:13:53,794 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181233793"}]},"ts":"1733181233793"} 2024-12-02T23:13:53,795 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLING in hbase:meta 2024-12-02T23:13:53,796 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {9e5ab3b623d7=0} racks are {/default-rack=0} 2024-12-02T23:13:53,799 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T23:13:53,799 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T23:13:53,799 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T23:13:53,799 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T23:13:53,799 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T23:13:53,799 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T23:13:53,799 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T23:13:53,799 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T23:13:53,799 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T23:13:53,799 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T23:13:53,799 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=65f1f4f527867ff1a6f9bc6f8478cb64, ASSIGN}, {pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=4adc1b05c211d1eb3660387c9531d6b5, ASSIGN}] 2024-12-02T23:13:53,801 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=4adc1b05c211d1eb3660387c9531d6b5, ASSIGN 2024-12-02T23:13:53,802 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=4adc1b05c211d1eb3660387c9531d6b5, ASSIGN; state=OFFLINE, location=9e5ab3b623d7,41311,1733180945828; forceNewPlan=false, retain=false 2024-12-02T23:13:53,803 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=65f1f4f527867ff1a6f9bc6f8478cb64, ASSIGN 2024-12-02T23:13:53,819 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=65f1f4f527867ff1a6f9bc6f8478cb64, ASSIGN; state=OFFLINE, location=9e5ab3b623d7,38049,1733180945779; forceNewPlan=false, retain=false 2024-12-02T23:13:53,953 INFO [9e5ab3b623d7:41513 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T23:13:53,956 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=65f1f4f527867ff1a6f9bc6f8478cb64, regionState=OPENING, regionLocation=9e5ab3b623d7,38049,1733180945779 2024-12-02T23:13:53,957 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=134 updating hbase:meta row=4adc1b05c211d1eb3660387c9531d6b5, regionState=OPENING, regionLocation=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:13:53,964 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=4adc1b05c211d1eb3660387c9531d6b5, ASSIGN because future has completed 2024-12-02T23:13:53,964 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4adc1b05c211d1eb3660387c9531d6b5, server=9e5ab3b623d7,41311,1733180945828}] 2024-12-02T23:13:53,977 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=65f1f4f527867ff1a6f9bc6f8478cb64, ASSIGN because future has completed 2024-12-02T23:13:53,981 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=136, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 65f1f4f527867ff1a6f9bc6f8478cb64, server=9e5ab3b623d7,38049,1733180945779}] 2024-12-02T23:13:53,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-02T23:13:54,156 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,1,1733181233641.4adc1b05c211d1eb3660387c9531d6b5. 2024-12-02T23:13:54,156 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7752): Opening region: {ENCODED => 4adc1b05c211d1eb3660387c9531d6b5, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733181233641.4adc1b05c211d1eb3660387c9531d6b5.', STARTKEY => '1', ENDKEY => ''} 2024-12-02T23:13:54,157 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1733181233641.4adc1b05c211d1eb3660387c9531d6b5. service=AccessControlService 2024-12-02T23:13:54,157 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T23:13:54,157 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 4adc1b05c211d1eb3660387c9531d6b5 2024-12-02T23:13:54,157 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1733181233641.4adc1b05c211d1eb3660387c9531d6b5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:13:54,157 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7794): checking encryption for 4adc1b05c211d1eb3660387c9531d6b5 2024-12-02T23:13:54,157 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7797): checking classloading for 4adc1b05c211d1eb3660387c9531d6b5 2024-12-02T23:13:54,161 INFO [StoreOpener-4adc1b05c211d1eb3660387c9531d6b5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 4adc1b05c211d1eb3660387c9531d6b5 2024-12-02T23:13:54,162 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,,1733181233641.65f1f4f527867ff1a6f9bc6f8478cb64. 2024-12-02T23:13:54,162 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7752): Opening region: {ENCODED => 65f1f4f527867ff1a6f9bc6f8478cb64, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733181233641.65f1f4f527867ff1a6f9bc6f8478cb64.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T23:13:54,163 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,,1733181233641.65f1f4f527867ff1a6f9bc6f8478cb64. service=AccessControlService 2024-12-02T23:13:54,163 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T23:13:54,163 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 65f1f4f527867ff1a6f9bc6f8478cb64 2024-12-02T23:13:54,163 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1733181233641.65f1f4f527867ff1a6f9bc6f8478cb64.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:13:54,163 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7794): checking encryption for 65f1f4f527867ff1a6f9bc6f8478cb64 2024-12-02T23:13:54,163 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7797): checking classloading for 65f1f4f527867ff1a6f9bc6f8478cb64 2024-12-02T23:13:54,172 INFO [StoreOpener-4adc1b05c211d1eb3660387c9531d6b5-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4adc1b05c211d1eb3660387c9531d6b5 columnFamilyName cf 2024-12-02T23:13:54,173 INFO [StoreOpener-65f1f4f527867ff1a6f9bc6f8478cb64-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 65f1f4f527867ff1a6f9bc6f8478cb64 2024-12-02T23:13:54,174 DEBUG [StoreOpener-4adc1b05c211d1eb3660387c9531d6b5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:13:54,174 INFO [StoreOpener-65f1f4f527867ff1a6f9bc6f8478cb64-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 65f1f4f527867ff1a6f9bc6f8478cb64 columnFamilyName cf 2024-12-02T23:13:54,185 INFO [StoreOpener-4adc1b05c211d1eb3660387c9531d6b5-1 {}] regionserver.HStore(327): Store=4adc1b05c211d1eb3660387c9531d6b5/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T23:13:54,186 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1038): replaying wal for 4adc1b05c211d1eb3660387c9531d6b5 2024-12-02T23:13:54,187 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion/4adc1b05c211d1eb3660387c9531d6b5 2024-12-02T23:13:54,188 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion/4adc1b05c211d1eb3660387c9531d6b5 2024-12-02T23:13:54,188 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1048): stopping wal replay for 4adc1b05c211d1eb3660387c9531d6b5 2024-12-02T23:13:54,188 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1060): Cleaning up temporary data for 4adc1b05c211d1eb3660387c9531d6b5 2024-12-02T23:13:54,190 DEBUG [StoreOpener-65f1f4f527867ff1a6f9bc6f8478cb64-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:13:54,190 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1093): writing seq id for 4adc1b05c211d1eb3660387c9531d6b5 2024-12-02T23:13:54,190 INFO [StoreOpener-65f1f4f527867ff1a6f9bc6f8478cb64-1 {}] regionserver.HStore(327): Store=65f1f4f527867ff1a6f9bc6f8478cb64/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T23:13:54,192 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1038): replaying wal for 65f1f4f527867ff1a6f9bc6f8478cb64 2024-12-02T23:13:54,192 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion/65f1f4f527867ff1a6f9bc6f8478cb64 2024-12-02T23:13:54,193 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion/65f1f4f527867ff1a6f9bc6f8478cb64 2024-12-02T23:13:54,193 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1048): stopping wal replay for 65f1f4f527867ff1a6f9bc6f8478cb64 2024-12-02T23:13:54,193 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1060): Cleaning up temporary data for 65f1f4f527867ff1a6f9bc6f8478cb64 2024-12-02T23:13:54,195 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1093): writing seq id for 65f1f4f527867ff1a6f9bc6f8478cb64 2024-12-02T23:13:54,198 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion/4adc1b05c211d1eb3660387c9531d6b5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T23:13:54,198 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1114): Opened 4adc1b05c211d1eb3660387c9531d6b5; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69195235, jitterRate=0.03108935058116913}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T23:13:54,199 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4adc1b05c211d1eb3660387c9531d6b5 2024-12-02T23:13:54,199 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1006): Region open journal for 4adc1b05c211d1eb3660387c9531d6b5: Running coprocessor pre-open hook at 1733181234157Writing region info on filesystem at 1733181234157Initializing all the Stores at 1733181234158 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733181234159 (+1 ms)Cleaning up temporary data from old regions at 1733181234188 (+29 ms)Running coprocessor post-open hooks at 1733181234199 (+11 ms)Region opened successfully at 1733181234199 2024-12-02T23:13:54,201 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1733181233641.4adc1b05c211d1eb3660387c9531d6b5., pid=135, masterSystemTime=1733181234151 2024-12-02T23:13:54,202 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion/65f1f4f527867ff1a6f9bc6f8478cb64/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T23:13:54,203 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1114): Opened 65f1f4f527867ff1a6f9bc6f8478cb64; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71200017, jitterRate=0.06096293032169342}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T23:13:54,203 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 65f1f4f527867ff1a6f9bc6f8478cb64 2024-12-02T23:13:54,203 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1006): Region open journal for 65f1f4f527867ff1a6f9bc6f8478cb64: Running coprocessor pre-open hook at 1733181234163Writing region info on filesystem at 1733181234163Initializing all the Stores at 1733181234172 (+9 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733181234172Cleaning up temporary data from old regions at 1733181234193 (+21 ms)Running coprocessor post-open hooks at 1733181234203 (+10 ms)Region opened successfully at 1733181234203 2024-12-02T23:13:54,204 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1733181233641.4adc1b05c211d1eb3660387c9531d6b5. 2024-12-02T23:13:54,204 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1733181233641.4adc1b05c211d1eb3660387c9531d6b5. 2024-12-02T23:13:54,204 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,,1733181233641.65f1f4f527867ff1a6f9bc6f8478cb64., pid=136, masterSystemTime=1733181234159 2024-12-02T23:13:54,204 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=134 updating hbase:meta row=4adc1b05c211d1eb3660387c9531d6b5, regionState=OPEN, openSeqNum=2, regionLocation=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:13:54,207 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=135, ppid=134, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4adc1b05c211d1eb3660387c9531d6b5, server=9e5ab3b623d7,41311,1733180945828 because future has completed 2024-12-02T23:13:54,207 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,,1733181233641.65f1f4f527867ff1a6f9bc6f8478cb64. 2024-12-02T23:13:54,207 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,,1733181233641.65f1f4f527867ff1a6f9bc6f8478cb64. 2024-12-02T23:13:54,208 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=65f1f4f527867ff1a6f9bc6f8478cb64, regionState=OPEN, openSeqNum=2, regionLocation=9e5ab3b623d7,38049,1733180945779 2024-12-02T23:13:54,210 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=136, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 65f1f4f527867ff1a6f9bc6f8478cb64, server=9e5ab3b623d7,38049,1733180945779 because future has completed 2024-12-02T23:13:54,211 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=135, resume processing ppid=134 2024-12-02T23:13:54,211 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=135, ppid=134, state=SUCCESS, hasLock=false; OpenRegionProcedure 4adc1b05c211d1eb3660387c9531d6b5, server=9e5ab3b623d7,41311,1733180945828 in 244 msec 2024-12-02T23:13:54,214 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=134, ppid=132, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=4adc1b05c211d1eb3660387c9531d6b5, ASSIGN in 412 msec 2024-12-02T23:13:54,215 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=136, resume processing ppid=133 2024-12-02T23:13:54,215 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=136, ppid=133, state=SUCCESS, hasLock=false; OpenRegionProcedure 65f1f4f527867ff1a6f9bc6f8478cb64, server=9e5ab3b623d7,38049,1733180945779 in 231 msec 2024-12-02T23:13:54,218 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=133, resume processing ppid=132 2024-12-02T23:13:54,218 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=133, ppid=132, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=65f1f4f527867ff1a6f9bc6f8478cb64, ASSIGN in 416 msec 2024-12-02T23:13:54,219 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T23:13:54,219 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181234219"}]},"ts":"1733181234219"} 2024-12-02T23:13:54,221 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLED in hbase:meta 2024-12-02T23:13:54,227 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T23:13:54,227 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion jenkins: RWXCA 2024-12-02T23:13:54,233 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41311 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-02T23:13:54,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:13:54,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:13:54,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:13:54,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:13:54,268 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T23:13:54,269 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T23:13:54,269 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T23:13:54,269 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T23:13:54,270 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=132, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 626 msec 2024-12-02T23:13:54,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-02T23:13:54,292 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-02T23:13:54,293 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-02T23:13:54,296 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T23:13:54,296 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1733181233641.65f1f4f527867ff1a6f9bc6f8478cb64. 2024-12-02T23:13:54,297 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T23:13:54,298 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-02T23:13:54,325 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-02T23:13:54,336 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-02T23:13:54,345 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-02T23:13:54,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733181234345 (current time:1733181234345). 2024-12-02T23:13:54,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T23:13:54,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-02T23:13:54,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T23:13:54,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d57fc3f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:13:54,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:13:54,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:13:54,352 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:13:54,352 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:13:54,352 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:13:54,352 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62072e66, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:13:54,352 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:13:54,352 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:13:54,353 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:13:54,354 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48588, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:13:54,355 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f1bced8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:13:54,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:13:54,359 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:13:54,359 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:13:54,360 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39956, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:13:54,362 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:13:54,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:13:54,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:13:54,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:13:54,363 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:13:54,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@166c5635, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:13:54,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:13:54,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:13:54,367 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:13:54,367 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:13:54,367 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:13:54,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:13:54,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74730174, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:13:54,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:13:54,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:13:54,369 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48608, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:13:54,370 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a3a71f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:13:54,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:13:54,372 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:13:54,372 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:13:54,373 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39962, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:13:54,375 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b., hostname=9e5ab3b623d7,41311,1733180945828, seqNum=2] 2024-12-02T23:13:54,376 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:13:54,377 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56676, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:13:54,378 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:13:54,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor223.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:13:54,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:13:54,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:13:54,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-02T23:13:54,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T23:13:54,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=137, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-02T23:13:54,383 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T23:13:54,384 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T23:13:54,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 137 2024-12-02T23:13:54,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-02T23:13:54,388 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:13:54,391 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T23:13:54,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742179_1355 (size=215) 2024-12-02T23:13:54,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742179_1355 (size=215) 2024-12-02T23:13:54,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742179_1355 (size=215) 2024-12-02T23:13:54,430 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T23:13:54,430 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 65f1f4f527867ff1a6f9bc6f8478cb64}, {pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4adc1b05c211d1eb3660387c9531d6b5}] 2024-12-02T23:13:54,432 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 65f1f4f527867ff1a6f9bc6f8478cb64 2024-12-02T23:13:54,432 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4adc1b05c211d1eb3660387c9531d6b5 2024-12-02T23:13:54,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-02T23:13:54,584 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41311 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=139 2024-12-02T23:13:54,584 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38049 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=138 2024-12-02T23:13:54,584 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733181233641.4adc1b05c211d1eb3660387c9531d6b5. 2024-12-02T23:13:54,584 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733181233641.65f1f4f527867ff1a6f9bc6f8478cb64. 2024-12-02T23:13:54,584 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.HRegion(2603): Flush status journal for 4adc1b05c211d1eb3660387c9531d6b5: 2024-12-02T23:13:54,584 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.HRegion(2603): Flush status journal for 65f1f4f527867ff1a6f9bc6f8478cb64: 2024-12-02T23:13:54,584 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1733181233641.4adc1b05c211d1eb3660387c9531d6b5. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-02T23:13:54,584 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1733181233641.65f1f4f527867ff1a6f9bc6f8478cb64. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-02T23:13:54,585 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1733181233641.4adc1b05c211d1eb3660387c9531d6b5.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T23:13:54,585 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:13:54,585 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1733181233641.65f1f4f527867ff1a6f9bc6f8478cb64.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T23:13:54,585 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T23:13:54,585 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:13:54,585 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T23:13:54,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742180_1356 (size=86) 2024-12-02T23:13:54,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742180_1356 (size=86) 2024-12-02T23:13:54,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742180_1356 (size=86) 2024-12-02T23:13:54,616 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733181233641.4adc1b05c211d1eb3660387c9531d6b5. 2024-12-02T23:13:54,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742181_1357 (size=86) 2024-12-02T23:13:54,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742181_1357 (size=86) 2024-12-02T23:13:54,616 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-12-02T23:13:54,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742181_1357 (size=86) 2024-12-02T23:13:54,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=139 2024-12-02T23:13:54,617 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 4adc1b05c211d1eb3660387c9531d6b5 2024-12-02T23:13:54,617 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733181233641.65f1f4f527867ff1a6f9bc6f8478cb64. 2024-12-02T23:13:54,617 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=138 2024-12-02T23:13:54,617 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4adc1b05c211d1eb3660387c9531d6b5 2024-12-02T23:13:54,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=138 2024-12-02T23:13:54,617 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 65f1f4f527867ff1a6f9bc6f8478cb64 2024-12-02T23:13:54,618 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 65f1f4f527867ff1a6f9bc6f8478cb64 2024-12-02T23:13:54,620 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=139, ppid=137, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 4adc1b05c211d1eb3660387c9531d6b5 in 189 msec 2024-12-02T23:13:54,621 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=138, resume processing ppid=137 2024-12-02T23:13:54,621 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=138, ppid=137, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 65f1f4f527867ff1a6f9bc6f8478cb64 in 189 msec 2024-12-02T23:13:54,621 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T23:13:54,622 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T23:13:54,623 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T23:13:54,623 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T23:13:54,623 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:13:54,623 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-02T23:13:54,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742182_1358 (size=78) 2024-12-02T23:13:54,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742182_1358 (size=78) 2024-12-02T23:13:54,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742182_1358 (size=78) 2024-12-02T23:13:54,636 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T23:13:54,636 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T23:13:54,637 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T23:13:54,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742183_1359 (size=713) 2024-12-02T23:13:54,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742183_1359 (size=713) 2024-12-02T23:13:54,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742183_1359 (size=713) 2024-12-02T23:13:54,668 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T23:13:54,672 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T23:13:54,673 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T23:13:54,674 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T23:13:54,674 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 137 2024-12-02T23:13:54,675 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=137, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 295 msec 2024-12-02T23:13:54,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-02T23:13:54,702 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-02T23:13:54,713 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38049 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,,1733181233641.65f1f4f527867ff1a6f9bc6f8478cb64. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T23:13:54,719 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41311 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,1,1733181233641.4adc1b05c211d1eb3660387c9531d6b5. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T23:13:54,720 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-02T23:13:54,725 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T23:13:54,725 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1733181233641.65f1f4f527867ff1a6f9bc6f8478cb64. 2024-12-02T23:13:54,726 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T23:13:54,727 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-02T23:13:54,733 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-02T23:13:54,739 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-02T23:13:54,741 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-02T23:13:54,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733181234741 (current time:1733181234741). 2024-12-02T23:13:54,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T23:13:54,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-02T23:13:54,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T23:13:54,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6316cb59, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:13:54,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:13:54,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:13:54,743 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:13:54,743 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:13:54,744 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:13:54,744 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a83165e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:13:54,744 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:13:54,744 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:13:54,744 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:13:54,745 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48640, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:13:54,746 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24eb28d6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:13:54,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:13:54,747 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:13:54,747 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:13:54,748 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39966, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:13:54,749 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:13:54,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:13:54,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:13:54,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:13:54,750 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:13:54,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31e10b1c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:13:54,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:13:54,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:13:54,751 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:13:54,751 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:13:54,751 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:13:54,752 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4004368a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:13:54,752 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:13:54,752 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:13:54,752 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:13:54,753 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48656, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:13:54,753 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14c051d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:13:54,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:13:54,755 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:13:54,755 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:13:54,756 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39974, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:13:54,757 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b., hostname=9e5ab3b623d7,41311,1733180945828, seqNum=2] 2024-12-02T23:13:54,757 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:13:54,758 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56680, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:13:54,759 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:13:54,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor223.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:13:54,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:13:54,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:13:54,759 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:13:54,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-02T23:13:54,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T23:13:54,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=140, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-02T23:13:54,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 140 2024-12-02T23:13:54,763 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T23:13:54,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-02T23:13:54,764 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T23:13:54,768 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T23:13:54,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742184_1360 (size=210) 2024-12-02T23:13:54,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742184_1360 (size=210) 2024-12-02T23:13:54,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742184_1360 (size=210) 2024-12-02T23:13:54,781 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T23:13:54,781 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 65f1f4f527867ff1a6f9bc6f8478cb64}, {pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4adc1b05c211d1eb3660387c9531d6b5}] 2024-12-02T23:13:54,782 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4adc1b05c211d1eb3660387c9531d6b5 2024-12-02T23:13:54,783 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 65f1f4f527867ff1a6f9bc6f8478cb64 2024-12-02T23:13:54,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-02T23:13:54,936 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41311 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=142 2024-12-02T23:13:54,936 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733181233641.4adc1b05c211d1eb3660387c9531d6b5. 2024-12-02T23:13:54,937 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2902): Flushing 4adc1b05c211d1eb3660387c9531d6b5 1/1 column families, dataSize=2.74 KB heapSize=6.16 KB 2024-12-02T23:13:54,938 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38049 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=141 2024-12-02T23:13:54,938 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733181233641.65f1f4f527867ff1a6f9bc6f8478cb64. 2024-12-02T23:13:54,939 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2902): Flushing 65f1f4f527867ff1a6f9bc6f8478cb64 1/1 column families, dataSize=534 B heapSize=1.38 KB 2024-12-02T23:13:54,983 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202e71a5bcc7dd045659ed756e920e5b270_65f1f4f527867ff1a6f9bc6f8478cb64 is 71, key is 00906fc2bcdb918ee5328fcaa429c2d1/cf:q/1733181234713/Put/seqid=0 2024-12-02T23:13:55,016 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202412025155527e379041c6ac3427bd1ccd6564_4adc1b05c211d1eb3660387c9531d6b5 is 71, key is 182eaf82fd1bebee43a672d9b08c06e4/cf:q/1733181234719/Put/seqid=0 2024-12-02T23:13:55,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742185_1361 (size=5451) 2024-12-02T23:13:55,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742185_1361 (size=5451) 2024-12-02T23:13:55,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742185_1361 (size=5451) 2024-12-02T23:13:55,031 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:13:55,037 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202e71a5bcc7dd045659ed756e920e5b270_65f1f4f527867ff1a6f9bc6f8478cb64 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241202e71a5bcc7dd045659ed756e920e5b270_65f1f4f527867ff1a6f9bc6f8478cb64 2024-12-02T23:13:55,039 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion/65f1f4f527867ff1a6f9bc6f8478cb64/.tmp/cf/1c6b88aea3cf4d9aa85367b253acbe1a, store: [table=testtb-testExportFileSystemStateWithMergeRegion family=cf region=65f1f4f527867ff1a6f9bc6f8478cb64] 2024-12-02T23:13:55,039 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion/65f1f4f527867ff1a6f9bc6f8478cb64/.tmp/cf/1c6b88aea3cf4d9aa85367b253acbe1a is 224, key is 0924b9986598971b224bcc339730f309d/cf:q/1733181234713/Put/seqid=0 2024-12-02T23:13:55,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742187_1363 (size=7084) 2024-12-02T23:13:55,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742187_1363 (size=7084) 2024-12-02T23:13:55,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742187_1363 (size=7084) 2024-12-02T23:13:55,060 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=534, hasBloomFilter=true, into tmp file hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion/65f1f4f527867ff1a6f9bc6f8478cb64/.tmp/cf/1c6b88aea3cf4d9aa85367b253acbe1a 2024-12-02T23:13:55,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742186_1362 (size=7822) 2024-12-02T23:13:55,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742186_1362 (size=7822) 2024-12-02T23:13:55,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742186_1362 (size=7822) 2024-12-02T23:13:55,086 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion/65f1f4f527867ff1a6f9bc6f8478cb64/.tmp/cf/1c6b88aea3cf4d9aa85367b253acbe1a as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion/65f1f4f527867ff1a6f9bc6f8478cb64/cf/1c6b88aea3cf4d9aa85367b253acbe1a 2024-12-02T23:13:55,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-02T23:13:55,110 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion/65f1f4f527867ff1a6f9bc6f8478cb64/cf/1c6b88aea3cf4d9aa85367b253acbe1a, entries=8, sequenceid=6, filesize=6.9 K 2024-12-02T23:13:55,112 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(3140): Finished flush of dataSize ~534 B/534, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 65f1f4f527867ff1a6f9bc6f8478cb64 in 172ms, sequenceid=6, compaction requested=false 2024-12-02T23:13:55,112 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-12-02T23:13:55,112 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2603): Flush status journal for 65f1f4f527867ff1a6f9bc6f8478cb64: 2024-12-02T23:13:55,112 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1733181233641.65f1f4f527867ff1a6f9bc6f8478cb64. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-02T23:13:55,113 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1733181233641.65f1f4f527867ff1a6f9bc6f8478cb64.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T23:13:55,113 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:13:55,113 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion/65f1f4f527867ff1a6f9bc6f8478cb64/cf/1c6b88aea3cf4d9aa85367b253acbe1a] hfiles 2024-12-02T23:13:55,113 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion/65f1f4f527867ff1a6f9bc6f8478cb64/cf/1c6b88aea3cf4d9aa85367b253acbe1a for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T23:13:55,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742188_1364 (size=125) 2024-12-02T23:13:55,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742188_1364 (size=125) 2024-12-02T23:13:55,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742188_1364 (size=125) 2024-12-02T23:13:55,202 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T23:13:55,202 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion Metrics about Tables on a single HBase RegionServer 2024-12-02T23:13:55,203 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-02T23:13:55,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-02T23:13:55,464 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:13:55,470 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202412025155527e379041c6ac3427bd1ccd6564_4adc1b05c211d1eb3660387c9531d6b5 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b202412025155527e379041c6ac3427bd1ccd6564_4adc1b05c211d1eb3660387c9531d6b5 2024-12-02T23:13:55,472 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion/4adc1b05c211d1eb3660387c9531d6b5/.tmp/cf/398bffd80ce648fc8117390b4a27780e, store: [table=testtb-testExportFileSystemStateWithMergeRegion family=cf region=4adc1b05c211d1eb3660387c9531d6b5] 2024-12-02T23:13:55,472 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion/4adc1b05c211d1eb3660387c9531d6b5/.tmp/cf/398bffd80ce648fc8117390b4a27780e is 224, key is 12f6f55271e89409f11d038029a0fb0f9/cf:q/1733181234719/Put/seqid=0 2024-12-02T23:13:55,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742189_1365 (size=14619) 2024-12-02T23:13:55,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742189_1365 (size=14619) 2024-12-02T23:13:55,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742189_1365 (size=14619) 2024-12-02T23:13:55,547 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion/4adc1b05c211d1eb3660387c9531d6b5/.tmp/cf/398bffd80ce648fc8117390b4a27780e 2024-12-02T23:13:55,554 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion/4adc1b05c211d1eb3660387c9531d6b5/.tmp/cf/398bffd80ce648fc8117390b4a27780e as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion/4adc1b05c211d1eb3660387c9531d6b5/cf/398bffd80ce648fc8117390b4a27780e 2024-12-02T23:13:55,559 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733181233641.65f1f4f527867ff1a6f9bc6f8478cb64. 2024-12-02T23:13:55,559 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-12-02T23:13:55,559 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion/4adc1b05c211d1eb3660387c9531d6b5/cf/398bffd80ce648fc8117390b4a27780e, entries=42, sequenceid=6, filesize=14.3 K 2024-12-02T23:13:55,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=141 2024-12-02T23:13:55,560 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 65f1f4f527867ff1a6f9bc6f8478cb64 2024-12-02T23:13:55,560 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 65f1f4f527867ff1a6f9bc6f8478cb64 2024-12-02T23:13:55,562 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(3140): Finished flush of dataSize ~2.74 KB/2802, heapSize ~6.14 KB/6288, currentSize=0 B/0 for 4adc1b05c211d1eb3660387c9531d6b5 in 625ms, sequenceid=6, compaction requested=false 2024-12-02T23:13:55,562 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2603): Flush status journal for 4adc1b05c211d1eb3660387c9531d6b5: 2024-12-02T23:13:55,562 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1733181233641.4adc1b05c211d1eb3660387c9531d6b5. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-02T23:13:55,562 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1733181233641.4adc1b05c211d1eb3660387c9531d6b5.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T23:13:55,562 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:13:55,562 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion/4adc1b05c211d1eb3660387c9531d6b5/cf/398bffd80ce648fc8117390b4a27780e] hfiles 2024-12-02T23:13:55,562 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion/4adc1b05c211d1eb3660387c9531d6b5/cf/398bffd80ce648fc8117390b4a27780e for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T23:13:55,563 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=141, ppid=140, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 65f1f4f527867ff1a6f9bc6f8478cb64 in 781 msec 2024-12-02T23:13:55,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742190_1366 (size=125) 2024-12-02T23:13:55,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742190_1366 (size=125) 2024-12-02T23:13:55,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742190_1366 (size=125) 2024-12-02T23:13:55,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733181233641.4adc1b05c211d1eb3660387c9531d6b5. 2024-12-02T23:13:55,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=142 2024-12-02T23:13:55,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=142 2024-12-02T23:13:55,611 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 4adc1b05c211d1eb3660387c9531d6b5 2024-12-02T23:13:55,612 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4adc1b05c211d1eb3660387c9531d6b5 2024-12-02T23:13:55,616 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=142, resume processing ppid=140 2024-12-02T23:13:55,616 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=142, ppid=140, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 4adc1b05c211d1eb3660387c9531d6b5 in 832 msec 2024-12-02T23:13:55,616 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T23:13:55,617 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T23:13:55,618 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T23:13:55,618 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T23:13:55,619 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:13:55,620 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b202412025155527e379041c6ac3427bd1ccd6564_4adc1b05c211d1eb3660387c9531d6b5, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241202e71a5bcc7dd045659ed756e920e5b270_65f1f4f527867ff1a6f9bc6f8478cb64] hfiles 2024-12-02T23:13:55,620 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b202412025155527e379041c6ac3427bd1ccd6564_4adc1b05c211d1eb3660387c9531d6b5 2024-12-02T23:13:55,620 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241202e71a5bcc7dd045659ed756e920e5b270_65f1f4f527867ff1a6f9bc6f8478cb64 2024-12-02T23:13:55,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742191_1367 (size=309) 2024-12-02T23:13:55,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742191_1367 (size=309) 2024-12-02T23:13:55,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742191_1367 (size=309) 2024-12-02T23:13:55,671 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T23:13:55,671 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T23:13:55,671 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T23:13:55,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742192_1368 (size=1023) 2024-12-02T23:13:55,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742192_1368 (size=1023) 2024-12-02T23:13:55,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742192_1368 (size=1023) 2024-12-02T23:13:55,724 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T23:13:55,736 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T23:13:55,736 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T23:13:55,739 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T23:13:55,739 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 140 2024-12-02T23:13:55,741 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=140, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 978 msec 2024-12-02T23:13:55,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-02T23:13:55,912 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-02T23:13:55,914 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T23:13:55,916 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T23:13:55,919 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48436, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T23:13:55,921 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T23:13:55,924 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56694, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T23:13:55,932 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39986, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T23:13:55,940 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testExportFileSystemStateWithMergeRegion-1', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T23:13:55,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T23:13:55,948 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T23:13:55,948 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:13:55,949 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion-1" procId is: 143 2024-12-02T23:13:55,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-02T23:13:55,953 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T23:13:56,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742193_1369 (size=399) 2024-12-02T23:13:56,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742193_1369 (size=399) 2024-12-02T23:13:56,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742193_1369 (size=399) 2024-12-02T23:13:56,038 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 116bbb1f3ad85fbca43dabb0569f488d, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235938.116bbb1f3ad85fbca43dabb0569f488d.', STARTKEY => '', ENDKEY => '2'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:13:56,042 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => d334bb82bb6c41cacabbb40f758ed67f, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1733181235938.d334bb82bb6c41cacabbb40f758ed67f.', STARTKEY => '2', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:13:56,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-02T23:13:56,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742194_1370 (size=85) 2024-12-02T23:13:56,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742194_1370 (size=85) 2024-12-02T23:13:56,117 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235938.116bbb1f3ad85fbca43dabb0569f488d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:13:56,117 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1722): Closing 116bbb1f3ad85fbca43dabb0569f488d, disabling compactions & flushes 2024-12-02T23:13:56,117 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235938.116bbb1f3ad85fbca43dabb0569f488d. 2024-12-02T23:13:56,117 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235938.116bbb1f3ad85fbca43dabb0569f488d. 2024-12-02T23:13:56,117 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235938.116bbb1f3ad85fbca43dabb0569f488d. after waiting 0 ms 2024-12-02T23:13:56,117 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235938.116bbb1f3ad85fbca43dabb0569f488d. 2024-12-02T23:13:56,117 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235938.116bbb1f3ad85fbca43dabb0569f488d. 2024-12-02T23:13:56,117 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1676): Region close journal for 116bbb1f3ad85fbca43dabb0569f488d: Waiting for close lock at 1733181236117Disabling compacts and flushes for region at 1733181236117Disabling writes for close at 1733181236117Writing region close event to WAL at 1733181236117Closed at 1733181236117 2024-12-02T23:13:56,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742194_1370 (size=85) 2024-12-02T23:13:56,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742195_1371 (size=85) 2024-12-02T23:13:56,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742195_1371 (size=85) 2024-12-02T23:13:56,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742195_1371 (size=85) 2024-12-02T23:13:56,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-02T23:13:56,527 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1733181235938.d334bb82bb6c41cacabbb40f758ed67f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:13:56,527 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1722): Closing d334bb82bb6c41cacabbb40f758ed67f, disabling compactions & flushes 2024-12-02T23:13:56,527 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733181235938.d334bb82bb6c41cacabbb40f758ed67f. 2024-12-02T23:13:56,527 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733181235938.d334bb82bb6c41cacabbb40f758ed67f. 2024-12-02T23:13:56,527 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733181235938.d334bb82bb6c41cacabbb40f758ed67f. after waiting 0 ms 2024-12-02T23:13:56,527 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733181235938.d334bb82bb6c41cacabbb40f758ed67f. 2024-12-02T23:13:56,527 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1733181235938.d334bb82bb6c41cacabbb40f758ed67f. 2024-12-02T23:13:56,527 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1676): Region close journal for d334bb82bb6c41cacabbb40f758ed67f: Waiting for close lock at 1733181236527Disabling compacts and flushes for region at 1733181236527Disabling writes for close at 1733181236527Writing region close event to WAL at 1733181236527Closed at 1733181236527 2024-12-02T23:13:56,528 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T23:13:56,528 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235938.116bbb1f3ad85fbca43dabb0569f488d.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1733181236528"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733181236528"}]},"ts":"1733181236528"} 2024-12-02T23:13:56,528 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1733181235938.d334bb82bb6c41cacabbb40f758ed67f.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1733181236528"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733181236528"}]},"ts":"1733181236528"} 2024-12-02T23:13:56,531 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T23:13:56,532 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T23:13:56,532 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181236532"}]},"ts":"1733181236532"} 2024-12-02T23:13:56,533 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLING in hbase:meta 2024-12-02T23:13:56,534 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {9e5ab3b623d7=0} racks are {/default-rack=0} 2024-12-02T23:13:56,535 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T23:13:56,535 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T23:13:56,535 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T23:13:56,535 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T23:13:56,535 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T23:13:56,535 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T23:13:56,535 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T23:13:56,535 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T23:13:56,535 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T23:13:56,535 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T23:13:56,536 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=116bbb1f3ad85fbca43dabb0569f488d, ASSIGN}, {pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d334bb82bb6c41cacabbb40f758ed67f, ASSIGN}] 2024-12-02T23:13:56,537 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d334bb82bb6c41cacabbb40f758ed67f, ASSIGN 2024-12-02T23:13:56,537 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=116bbb1f3ad85fbca43dabb0569f488d, ASSIGN 2024-12-02T23:13:56,539 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d334bb82bb6c41cacabbb40f758ed67f, ASSIGN; state=OFFLINE, location=9e5ab3b623d7,38049,1733180945779; forceNewPlan=false, retain=false 2024-12-02T23:13:56,539 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=116bbb1f3ad85fbca43dabb0569f488d, ASSIGN; state=OFFLINE, location=9e5ab3b623d7,39847,1733180945622; forceNewPlan=false, retain=false 2024-12-02T23:13:56,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-02T23:13:56,690 INFO [9e5ab3b623d7:41513 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T23:13:56,690 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=145 updating hbase:meta row=d334bb82bb6c41cacabbb40f758ed67f, regionState=OPENING, regionLocation=9e5ab3b623d7,38049,1733180945779 2024-12-02T23:13:56,690 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=116bbb1f3ad85fbca43dabb0569f488d, regionState=OPENING, regionLocation=9e5ab3b623d7,39847,1733180945622 2024-12-02T23:13:56,698 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d334bb82bb6c41cacabbb40f758ed67f, ASSIGN because future has completed 2024-12-02T23:13:56,698 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=146, ppid=145, state=RUNNABLE, hasLock=false; OpenRegionProcedure d334bb82bb6c41cacabbb40f758ed67f, server=9e5ab3b623d7,38049,1733180945779}] 2024-12-02T23:13:56,699 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=116bbb1f3ad85fbca43dabb0569f488d, ASSIGN because future has completed 2024-12-02T23:13:56,699 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=147, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure 116bbb1f3ad85fbca43dabb0569f488d, server=9e5ab3b623d7,39847,1733180945622}] 2024-12-02T23:13:56,854 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,2,1733181235938.d334bb82bb6c41cacabbb40f758ed67f. 2024-12-02T23:13:56,854 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7752): Opening region: {ENCODED => d334bb82bb6c41cacabbb40f758ed67f, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1733181235938.d334bb82bb6c41cacabbb40f758ed67f.', STARTKEY => '2', ENDKEY => ''} 2024-12-02T23:13:56,855 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1733181235938.d334bb82bb6c41cacabbb40f758ed67f. service=AccessControlService 2024-12-02T23:13:56,855 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T23:13:56,855 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 d334bb82bb6c41cacabbb40f758ed67f 2024-12-02T23:13:56,855 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1733181235938.d334bb82bb6c41cacabbb40f758ed67f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:13:56,855 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7794): checking encryption for d334bb82bb6c41cacabbb40f758ed67f 2024-12-02T23:13:56,855 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7797): checking classloading for d334bb82bb6c41cacabbb40f758ed67f 2024-12-02T23:13:56,857 INFO [StoreOpener-d334bb82bb6c41cacabbb40f758ed67f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d334bb82bb6c41cacabbb40f758ed67f 2024-12-02T23:13:56,862 INFO [StoreOpener-d334bb82bb6c41cacabbb40f758ed67f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d334bb82bb6c41cacabbb40f758ed67f columnFamilyName cf 2024-12-02T23:13:56,862 DEBUG [StoreOpener-d334bb82bb6c41cacabbb40f758ed67f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:13:56,863 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235938.116bbb1f3ad85fbca43dabb0569f488d. 2024-12-02T23:13:56,863 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7752): Opening region: {ENCODED => 116bbb1f3ad85fbca43dabb0569f488d, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235938.116bbb1f3ad85fbca43dabb0569f488d.', STARTKEY => '', ENDKEY => '2'} 2024-12-02T23:13:56,863 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235938.116bbb1f3ad85fbca43dabb0569f488d. service=AccessControlService 2024-12-02T23:13:56,863 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T23:13:56,864 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 116bbb1f3ad85fbca43dabb0569f488d 2024-12-02T23:13:56,864 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235938.116bbb1f3ad85fbca43dabb0569f488d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:13:56,864 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7794): checking encryption for 116bbb1f3ad85fbca43dabb0569f488d 2024-12-02T23:13:56,864 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7797): checking classloading for 116bbb1f3ad85fbca43dabb0569f488d 2024-12-02T23:13:56,865 INFO [StoreOpener-116bbb1f3ad85fbca43dabb0569f488d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 116bbb1f3ad85fbca43dabb0569f488d 2024-12-02T23:13:56,865 INFO [StoreOpener-d334bb82bb6c41cacabbb40f758ed67f-1 {}] regionserver.HStore(327): Store=d334bb82bb6c41cacabbb40f758ed67f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T23:13:56,865 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1038): replaying wal for d334bb82bb6c41cacabbb40f758ed67f 2024-12-02T23:13:56,866 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d334bb82bb6c41cacabbb40f758ed67f 2024-12-02T23:13:56,866 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d334bb82bb6c41cacabbb40f758ed67f 2024-12-02T23:13:56,867 INFO [StoreOpener-116bbb1f3ad85fbca43dabb0569f488d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 116bbb1f3ad85fbca43dabb0569f488d columnFamilyName cf 2024-12-02T23:13:56,867 DEBUG [StoreOpener-116bbb1f3ad85fbca43dabb0569f488d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:13:56,867 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1048): stopping wal replay for d334bb82bb6c41cacabbb40f758ed67f 2024-12-02T23:13:56,867 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1060): Cleaning up temporary data for d334bb82bb6c41cacabbb40f758ed67f 2024-12-02T23:13:56,867 INFO [StoreOpener-116bbb1f3ad85fbca43dabb0569f488d-1 {}] regionserver.HStore(327): Store=116bbb1f3ad85fbca43dabb0569f488d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T23:13:56,868 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1038): replaying wal for 116bbb1f3ad85fbca43dabb0569f488d 2024-12-02T23:13:56,869 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/116bbb1f3ad85fbca43dabb0569f488d 2024-12-02T23:13:56,869 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/116bbb1f3ad85fbca43dabb0569f488d 2024-12-02T23:13:56,869 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1048): stopping wal replay for 116bbb1f3ad85fbca43dabb0569f488d 2024-12-02T23:13:56,869 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1093): writing seq id for d334bb82bb6c41cacabbb40f758ed67f 2024-12-02T23:13:56,870 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1060): Cleaning up temporary data for 116bbb1f3ad85fbca43dabb0569f488d 2024-12-02T23:13:56,872 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d334bb82bb6c41cacabbb40f758ed67f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T23:13:56,872 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1114): Opened d334bb82bb6c41cacabbb40f758ed67f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68554049, jitterRate=0.021534934639930725}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T23:13:56,872 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d334bb82bb6c41cacabbb40f758ed67f 2024-12-02T23:13:56,873 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1006): Region open journal for d334bb82bb6c41cacabbb40f758ed67f: Running coprocessor pre-open hook at 1733181236855Writing region info on filesystem at 1733181236855Initializing all the Stores at 1733181236856 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733181236856Cleaning up temporary data from old regions at 1733181236867 (+11 ms)Running coprocessor post-open hooks at 1733181236872 (+5 ms)Region opened successfully at 1733181236873 (+1 ms) 2024-12-02T23:13:56,873 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1093): writing seq id for 116bbb1f3ad85fbca43dabb0569f488d 2024-12-02T23:13:56,875 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/116bbb1f3ad85fbca43dabb0569f488d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T23:13:56,875 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1114): Opened 116bbb1f3ad85fbca43dabb0569f488d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67909485, jitterRate=0.011930182576179504}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T23:13:56,875 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 116bbb1f3ad85fbca43dabb0569f488d 2024-12-02T23:13:56,875 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1006): Region open journal for 116bbb1f3ad85fbca43dabb0569f488d: Running coprocessor pre-open hook at 1733181236864Writing region info on filesystem at 1733181236864Initializing all the Stores at 1733181236865 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733181236865Cleaning up temporary data from old regions at 1733181236870 (+5 ms)Running coprocessor post-open hooks at 1733181236875 (+5 ms)Region opened successfully at 1733181236875 2024-12-02T23:13:56,878 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235938.116bbb1f3ad85fbca43dabb0569f488d., pid=147, masterSystemTime=1733181236851 2024-12-02T23:13:56,882 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,2,1733181235938.d334bb82bb6c41cacabbb40f758ed67f., pid=146, masterSystemTime=1733181236851 2024-12-02T23:13:56,891 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235938.116bbb1f3ad85fbca43dabb0569f488d. 2024-12-02T23:13:56,891 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235938.116bbb1f3ad85fbca43dabb0569f488d. 2024-12-02T23:13:56,894 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,2,1733181235938.d334bb82bb6c41cacabbb40f758ed67f. 2024-12-02T23:13:56,894 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,2,1733181235938.d334bb82bb6c41cacabbb40f758ed67f. 2024-12-02T23:13:56,894 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=116bbb1f3ad85fbca43dabb0569f488d, regionState=OPEN, openSeqNum=2, regionLocation=9e5ab3b623d7,39847,1733180945622 2024-12-02T23:13:56,898 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=145 updating hbase:meta row=d334bb82bb6c41cacabbb40f758ed67f, regionState=OPEN, openSeqNum=2, regionLocation=9e5ab3b623d7,38049,1733180945779 2024-12-02T23:13:56,899 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=147, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure 116bbb1f3ad85fbca43dabb0569f488d, server=9e5ab3b623d7,39847,1733180945622 because future has completed 2024-12-02T23:13:56,900 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=146, ppid=145, state=RUNNABLE, hasLock=false; OpenRegionProcedure d334bb82bb6c41cacabbb40f758ed67f, server=9e5ab3b623d7,38049,1733180945779 because future has completed 2024-12-02T23:13:56,905 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=147, resume processing ppid=144 2024-12-02T23:13:56,905 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=147, ppid=144, state=SUCCESS, hasLock=false; OpenRegionProcedure 116bbb1f3ad85fbca43dabb0569f488d, server=9e5ab3b623d7,39847,1733180945622 in 202 msec 2024-12-02T23:13:56,906 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=146, resume processing ppid=145 2024-12-02T23:13:56,906 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=146, ppid=145, state=SUCCESS, hasLock=false; OpenRegionProcedure d334bb82bb6c41cacabbb40f758ed67f, server=9e5ab3b623d7,38049,1733180945779 in 203 msec 2024-12-02T23:13:56,907 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=144, ppid=143, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=116bbb1f3ad85fbca43dabb0569f488d, ASSIGN in 369 msec 2024-12-02T23:13:56,909 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=145, resume processing ppid=143 2024-12-02T23:13:56,909 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=145, ppid=143, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d334bb82bb6c41cacabbb40f758ed67f, ASSIGN in 370 msec 2024-12-02T23:13:56,910 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T23:13:56,910 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181236910"}]},"ts":"1733181236910"} 2024-12-02T23:13:56,912 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLED in hbase:meta 2024-12-02T23:13:56,913 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T23:13:56,913 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion-1 jenkins: RWXCA 2024-12-02T23:13:56,917 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41311 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-02T23:13:56,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:13:56,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:13:56,920 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:13:56,923 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T23:13:56,923 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T23:13:56,923 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-02T23:13:56,923 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-02T23:13:56,923 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T23:13:56,923 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-02T23:13:56,924 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=143, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 982 msec 2024-12-02T23:13:56,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:13:56,927 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T23:13:56,927 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-02T23:13:57,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-02T23:13:57,094 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-02T23:13:57,099 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='1', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235938.116bbb1f3ad85fbca43dabb0569f488d., hostname=9e5ab3b623d7,39847,1733180945622, seqNum=2] 2024-12-02T23:13:57,108 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='2', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1733181235938.d334bb82bb6c41cacabbb40f758ed67f., hostname=9e5ab3b623d7,38049,1733180945779, seqNum=2] 2024-12-02T23:13:57,111 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion-1,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion-1 ,, for max=2147483647 with caching=100 2024-12-02T23:13:57,143 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster$2(2278): Client=jenkins//172.17.0.3 merge regions [116bbb1f3ad85fbca43dabb0569f488d, d334bb82bb6c41cacabbb40f758ed67f] 2024-12-02T23:13:57,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[116bbb1f3ad85fbca43dabb0569f488d, d334bb82bb6c41cacabbb40f758ed67f], force=true 2024-12-02T23:13:57,151 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[116bbb1f3ad85fbca43dabb0569f488d, d334bb82bb6c41cacabbb40f758ed67f], force=true 2024-12-02T23:13:57,151 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[116bbb1f3ad85fbca43dabb0569f488d, d334bb82bb6c41cacabbb40f758ed67f], force=true 2024-12-02T23:13:57,151 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[116bbb1f3ad85fbca43dabb0569f488d, d334bb82bb6c41cacabbb40f758ed67f], force=true 2024-12-02T23:13:57,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-02T23:13:57,160 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=116bbb1f3ad85fbca43dabb0569f488d, UNASSIGN}, {pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d334bb82bb6c41cacabbb40f758ed67f, UNASSIGN}] 2024-12-02T23:13:57,162 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=116bbb1f3ad85fbca43dabb0569f488d, UNASSIGN 2024-12-02T23:13:57,163 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d334bb82bb6c41cacabbb40f758ed67f, UNASSIGN 2024-12-02T23:13:57,163 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=149 updating hbase:meta row=116bbb1f3ad85fbca43dabb0569f488d, regionState=CLOSING, regionLocation=9e5ab3b623d7,39847,1733180945622 2024-12-02T23:13:57,174 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=150 updating hbase:meta row=d334bb82bb6c41cacabbb40f758ed67f, regionState=CLOSING, regionLocation=9e5ab3b623d7,38049,1733180945779 2024-12-02T23:13:57,174 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=116bbb1f3ad85fbca43dabb0569f488d, UNASSIGN because future has completed 2024-12-02T23:13:57,174 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-02T23:13:57,174 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=151, ppid=149, state=RUNNABLE, hasLock=false; CloseRegionProcedure 116bbb1f3ad85fbca43dabb0569f488d, server=9e5ab3b623d7,39847,1733180945622}] 2024-12-02T23:13:57,179 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d334bb82bb6c41cacabbb40f758ed67f, UNASSIGN because future has completed 2024-12-02T23:13:57,181 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-02T23:13:57,181 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=152, ppid=150, state=RUNNABLE, hasLock=false; CloseRegionProcedure d334bb82bb6c41cacabbb40f758ed67f, server=9e5ab3b623d7,38049,1733180945779}] 2024-12-02T23:13:57,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-02T23:13:57,331 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(122): Close 116bbb1f3ad85fbca43dabb0569f488d 2024-12-02T23:13:57,331 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-02T23:13:57,332 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1722): Closing 116bbb1f3ad85fbca43dabb0569f488d, disabling compactions & flushes 2024-12-02T23:13:57,332 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235938.116bbb1f3ad85fbca43dabb0569f488d. 2024-12-02T23:13:57,332 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235938.116bbb1f3ad85fbca43dabb0569f488d. 2024-12-02T23:13:57,332 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235938.116bbb1f3ad85fbca43dabb0569f488d. after waiting 0 ms 2024-12-02T23:13:57,332 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235938.116bbb1f3ad85fbca43dabb0569f488d. 2024-12-02T23:13:57,332 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(2902): Flushing 116bbb1f3ad85fbca43dabb0569f488d 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-02T23:13:57,335 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(122): Close d334bb82bb6c41cacabbb40f758ed67f 2024-12-02T23:13:57,335 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-02T23:13:57,335 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1722): Closing d334bb82bb6c41cacabbb40f758ed67f, disabling compactions & flushes 2024-12-02T23:13:57,335 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733181235938.d334bb82bb6c41cacabbb40f758ed67f. 2024-12-02T23:13:57,335 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733181235938.d334bb82bb6c41cacabbb40f758ed67f. 2024-12-02T23:13:57,336 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733181235938.d334bb82bb6c41cacabbb40f758ed67f. after waiting 0 ms 2024-12-02T23:13:57,336 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733181235938.d334bb82bb6c41cacabbb40f758ed67f. 2024-12-02T23:13:57,336 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(2902): Flushing d334bb82bb6c41cacabbb40f758ed67f 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-02T23:13:57,363 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d334bb82bb6c41cacabbb40f758ed67f/.tmp/cf/ef849f790d2c4bb09cfd9c80efd6bf3e is 28, key is 2/cf:/1733181237110/Put/seqid=0 2024-12-02T23:13:57,376 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/116bbb1f3ad85fbca43dabb0569f488d/.tmp/cf/c4cf533835024c56a3d7c15458aa628b is 28, key is 1/cf:/1733181237101/Put/seqid=0 2024-12-02T23:13:57,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742197_1373 (size=4945) 2024-12-02T23:13:57,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742197_1373 (size=4945) 2024-12-02T23:13:57,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742197_1373 (size=4945) 2024-12-02T23:13:57,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742196_1372 (size=4945) 2024-12-02T23:13:57,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742196_1372 (size=4945) 2024-12-02T23:13:57,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742196_1372 (size=4945) 2024-12-02T23:13:57,462 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d334bb82bb6c41cacabbb40f758ed67f/.tmp/cf/ef849f790d2c4bb09cfd9c80efd6bf3e 2024-12-02T23:13:57,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-02T23:13:57,495 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d334bb82bb6c41cacabbb40f758ed67f/.tmp/cf/ef849f790d2c4bb09cfd9c80efd6bf3e as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d334bb82bb6c41cacabbb40f758ed67f/cf/ef849f790d2c4bb09cfd9c80efd6bf3e 2024-12-02T23:13:57,511 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d334bb82bb6c41cacabbb40f758ed67f/cf/ef849f790d2c4bb09cfd9c80efd6bf3e, entries=1, sequenceid=5, filesize=4.8 K 2024-12-02T23:13:57,513 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for d334bb82bb6c41cacabbb40f758ed67f in 177ms, sequenceid=5, compaction requested=false 2024-12-02T23:13:57,513 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-12-02T23:13:57,534 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d334bb82bb6c41cacabbb40f758ed67f/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-02T23:13:57,535 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:13:57,535 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1733181235938.d334bb82bb6c41cacabbb40f758ed67f. 2024-12-02T23:13:57,535 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1676): Region close journal for d334bb82bb6c41cacabbb40f758ed67f: Waiting for close lock at 1733181237335Running coprocessor pre-close hooks at 1733181237335Disabling compacts and flushes for region at 1733181237335Disabling writes for close at 1733181237336 (+1 ms)Obtaining lock to block concurrent updates at 1733181237336Preparing flush snapshotting stores in d334bb82bb6c41cacabbb40f758ed67f at 1733181237336Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,2,1733181235938.d334bb82bb6c41cacabbb40f758ed67f., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1733181237336Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,2,1733181235938.d334bb82bb6c41cacabbb40f758ed67f. at 1733181237337 (+1 ms)Flushing d334bb82bb6c41cacabbb40f758ed67f/cf: creating writer at 1733181237337Flushing d334bb82bb6c41cacabbb40f758ed67f/cf: appending metadata at 1733181237361 (+24 ms)Flushing d334bb82bb6c41cacabbb40f758ed67f/cf: closing flushed file at 1733181237362 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2c36a88e: reopening flushed file at 1733181237491 (+129 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for d334bb82bb6c41cacabbb40f758ed67f in 177ms, sequenceid=5, compaction requested=false at 1733181237513 (+22 ms)Writing region close event to WAL at 1733181237530 (+17 ms)Running coprocessor post-close hooks at 1733181237535 (+5 ms)Closed at 1733181237535 2024-12-02T23:13:57,539 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=150 updating hbase:meta row=d334bb82bb6c41cacabbb40f758ed67f, regionState=CLOSED 2024-12-02T23:13:57,543 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(157): Closed d334bb82bb6c41cacabbb40f758ed67f 2024-12-02T23:13:57,550 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=152, ppid=150, state=RUNNABLE, hasLock=false; CloseRegionProcedure d334bb82bb6c41cacabbb40f758ed67f, server=9e5ab3b623d7,38049,1733180945779 because future has completed 2024-12-02T23:13:57,554 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=152, resume processing ppid=150 2024-12-02T23:13:57,554 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=152, ppid=150, state=SUCCESS, hasLock=false; CloseRegionProcedure d334bb82bb6c41cacabbb40f758ed67f, server=9e5ab3b623d7,38049,1733180945779 in 370 msec 2024-12-02T23:13:57,562 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=150, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=d334bb82bb6c41cacabbb40f758ed67f, UNASSIGN in 394 msec 2024-12-02T23:13:57,674 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0006_000001 (auth:SIMPLE) from 127.0.0.1:43056 2024-12-02T23:13:57,685 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_0/usercache/jenkins/appcache/application_1733180955226_0006/container_1733180955226_0006_01_000001/launch_container.sh] 2024-12-02T23:13:57,685 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_0/usercache/jenkins/appcache/application_1733180955226_0006/container_1733180955226_0006_01_000001/container_tokens] 2024-12-02T23:13:57,685 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_0/usercache/jenkins/appcache/application_1733180955226_0006/container_1733180955226_0006_01_000001/sysfs] 2024-12-02T23:13:57,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-02T23:13:57,862 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/116bbb1f3ad85fbca43dabb0569f488d/.tmp/cf/c4cf533835024c56a3d7c15458aa628b 2024-12-02T23:13:57,883 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/116bbb1f3ad85fbca43dabb0569f488d/.tmp/cf/c4cf533835024c56a3d7c15458aa628b as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/116bbb1f3ad85fbca43dabb0569f488d/cf/c4cf533835024c56a3d7c15458aa628b 2024-12-02T23:13:57,888 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/116bbb1f3ad85fbca43dabb0569f488d/cf/c4cf533835024c56a3d7c15458aa628b, entries=1, sequenceid=5, filesize=4.8 K 2024-12-02T23:13:57,889 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 116bbb1f3ad85fbca43dabb0569f488d in 557ms, sequenceid=5, compaction requested=false 2024-12-02T23:13:57,893 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/116bbb1f3ad85fbca43dabb0569f488d/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-02T23:13:57,893 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:13:57,894 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235938.116bbb1f3ad85fbca43dabb0569f488d. 2024-12-02T23:13:57,894 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1676): Region close journal for 116bbb1f3ad85fbca43dabb0569f488d: Waiting for close lock at 1733181237332Running coprocessor pre-close hooks at 1733181237332Disabling compacts and flushes for region at 1733181237332Disabling writes for close at 1733181237332Obtaining lock to block concurrent updates at 1733181237332Preparing flush snapshotting stores in 116bbb1f3ad85fbca43dabb0569f488d at 1733181237332Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235938.116bbb1f3ad85fbca43dabb0569f488d., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1733181237332Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235938.116bbb1f3ad85fbca43dabb0569f488d. at 1733181237333 (+1 ms)Flushing 116bbb1f3ad85fbca43dabb0569f488d/cf: creating writer at 1733181237333Flushing 116bbb1f3ad85fbca43dabb0569f488d/cf: appending metadata at 1733181237376 (+43 ms)Flushing 116bbb1f3ad85fbca43dabb0569f488d/cf: closing flushed file at 1733181237376Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7853e7c2: reopening flushed file at 1733181237882 (+506 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 116bbb1f3ad85fbca43dabb0569f488d in 557ms, sequenceid=5, compaction requested=false at 1733181237889 (+7 ms)Writing region close event to WAL at 1733181237890 (+1 ms)Running coprocessor post-close hooks at 1733181237893 (+3 ms)Closed at 1733181237894 (+1 ms) 2024-12-02T23:13:57,895 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(157): Closed 116bbb1f3ad85fbca43dabb0569f488d 2024-12-02T23:13:57,896 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=149 updating hbase:meta row=116bbb1f3ad85fbca43dabb0569f488d, regionState=CLOSED 2024-12-02T23:13:57,903 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=151, ppid=149, state=RUNNABLE, hasLock=false; CloseRegionProcedure 116bbb1f3ad85fbca43dabb0569f488d, server=9e5ab3b623d7,39847,1733180945622 because future has completed 2024-12-02T23:13:57,908 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=151, resume processing ppid=149 2024-12-02T23:13:57,908 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=151, ppid=149, state=SUCCESS, hasLock=false; CloseRegionProcedure 116bbb1f3ad85fbca43dabb0569f488d, server=9e5ab3b623d7,39847,1733180945622 in 731 msec 2024-12-02T23:13:57,911 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=149, resume processing ppid=148 2024-12-02T23:13:57,911 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=149, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=116bbb1f3ad85fbca43dabb0569f488d, UNASSIGN in 748 msec 2024-12-02T23:13:57,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742198_1374 (size=84) 2024-12-02T23:13:57,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742198_1374 (size=84) 2024-12-02T23:13:57,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742198_1374 (size=84) 2024-12-02T23:13:57,964 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:13:57,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742199_1375 (size=20) 2024-12-02T23:13:57,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742199_1375 (size=20) 2024-12-02T23:13:57,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742199_1375 (size=20) 2024-12-02T23:13:58,001 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:13:58,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742200_1376 (size=21) 2024-12-02T23:13:58,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742200_1376 (size=21) 2024-12-02T23:13:58,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742200_1376 (size=21) 2024-12-02T23:13:58,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742201_1377 (size=84) 2024-12-02T23:13:58,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742201_1377 (size=84) 2024-12-02T23:13:58,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742201_1377 (size=84) 2024-12-02T23:13:58,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-02T23:13:58,425 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:13:58,436 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e51dffa61d3cd9bf7afa2719f876bf5c/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=-1 2024-12-02T23:13:58,438 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235938.116bbb1f3ad85fbca43dabb0569f488d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-02T23:13:58,438 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1733181235938.d334bb82bb6c41cacabbb40f758ed67f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-02T23:13:58,439 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Put {"totalColumns":7,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235939.e51dffa61d3cd9bf7afa2719f876bf5c.","families":{"info":[{"qualifier":"regioninfo","vlen":83,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0000","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0001","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-02T23:13:58,443 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e51dffa61d3cd9bf7afa2719f876bf5c, ASSIGN}] 2024-12-02T23:13:58,444 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e51dffa61d3cd9bf7afa2719f876bf5c, ASSIGN 2024-12-02T23:13:58,445 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e51dffa61d3cd9bf7afa2719f876bf5c, ASSIGN; state=MERGED, location=9e5ab3b623d7,39847,1733180945622; forceNewPlan=false, retain=false 2024-12-02T23:13:58,596 INFO [9e5ab3b623d7:41513 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-02T23:13:58,596 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=e51dffa61d3cd9bf7afa2719f876bf5c, regionState=OPENING, regionLocation=9e5ab3b623d7,39847,1733180945622 2024-12-02T23:13:58,599 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e51dffa61d3cd9bf7afa2719f876bf5c, ASSIGN because future has completed 2024-12-02T23:13:58,599 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE, hasLock=false; OpenRegionProcedure e51dffa61d3cd9bf7afa2719f876bf5c, server=9e5ab3b623d7,39847,1733180945622}] 2024-12-02T23:13:58,755 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235939.e51dffa61d3cd9bf7afa2719f876bf5c. 2024-12-02T23:13:58,755 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7752): Opening region: {ENCODED => e51dffa61d3cd9bf7afa2719f876bf5c, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235939.e51dffa61d3cd9bf7afa2719f876bf5c.', STARTKEY => '', ENDKEY => ''} 2024-12-02T23:13:58,756 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235939.e51dffa61d3cd9bf7afa2719f876bf5c. service=AccessControlService 2024-12-02T23:13:58,756 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T23:13:58,756 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 e51dffa61d3cd9bf7afa2719f876bf5c 2024-12-02T23:13:58,756 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235939.e51dffa61d3cd9bf7afa2719f876bf5c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:13:58,756 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7794): checking encryption for e51dffa61d3cd9bf7afa2719f876bf5c 2024-12-02T23:13:58,756 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7797): checking classloading for e51dffa61d3cd9bf7afa2719f876bf5c 2024-12-02T23:13:58,757 INFO [StoreOpener-e51dffa61d3cd9bf7afa2719f876bf5c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e51dffa61d3cd9bf7afa2719f876bf5c 2024-12-02T23:13:58,758 INFO [StoreOpener-e51dffa61d3cd9bf7afa2719f876bf5c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e51dffa61d3cd9bf7afa2719f876bf5c columnFamilyName cf 2024-12-02T23:13:58,759 DEBUG [StoreOpener-e51dffa61d3cd9bf7afa2719f876bf5c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:13:58,769 DEBUG [StoreOpener-e51dffa61d3cd9bf7afa2719f876bf5c-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e51dffa61d3cd9bf7afa2719f876bf5c/cf/c4cf533835024c56a3d7c15458aa628b.116bbb1f3ad85fbca43dabb0569f488d->hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/116bbb1f3ad85fbca43dabb0569f488d/cf/c4cf533835024c56a3d7c15458aa628b-top 2024-12-02T23:13:58,774 DEBUG [StoreOpener-e51dffa61d3cd9bf7afa2719f876bf5c-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e51dffa61d3cd9bf7afa2719f876bf5c/cf/ef849f790d2c4bb09cfd9c80efd6bf3e.d334bb82bb6c41cacabbb40f758ed67f->hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d334bb82bb6c41cacabbb40f758ed67f/cf/ef849f790d2c4bb09cfd9c80efd6bf3e-top 2024-12-02T23:13:58,775 INFO [StoreOpener-e51dffa61d3cd9bf7afa2719f876bf5c-1 {}] regionserver.HStore(327): Store=e51dffa61d3cd9bf7afa2719f876bf5c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T23:13:58,775 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1038): replaying wal for e51dffa61d3cd9bf7afa2719f876bf5c 2024-12-02T23:13:58,776 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e51dffa61d3cd9bf7afa2719f876bf5c 2024-12-02T23:13:58,776 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e51dffa61d3cd9bf7afa2719f876bf5c 2024-12-02T23:13:58,777 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1048): stopping wal replay for e51dffa61d3cd9bf7afa2719f876bf5c 2024-12-02T23:13:58,777 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1060): Cleaning up temporary data for e51dffa61d3cd9bf7afa2719f876bf5c 2024-12-02T23:13:58,778 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1093): writing seq id for e51dffa61d3cd9bf7afa2719f876bf5c 2024-12-02T23:13:58,779 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1114): Opened e51dffa61d3cd9bf7afa2719f876bf5c; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64170103, jitterRate=-0.04379095137119293}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T23:13:58,779 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e51dffa61d3cd9bf7afa2719f876bf5c 2024-12-02T23:13:58,779 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1006): Region open journal for e51dffa61d3cd9bf7afa2719f876bf5c: Running coprocessor pre-open hook at 1733181238756Writing region info on filesystem at 1733181238756Initializing all the Stores at 1733181238757 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733181238757Cleaning up temporary data from old regions at 1733181238777 (+20 ms)Running coprocessor post-open hooks at 1733181238779 (+2 ms)Region opened successfully at 1733181238779 2024-12-02T23:13:58,780 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235939.e51dffa61d3cd9bf7afa2719f876bf5c., pid=154, masterSystemTime=1733181238752 2024-12-02T23:13:58,780 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.CompactSplit(342): Ignoring compaction request for testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235939.e51dffa61d3cd9bf7afa2719f876bf5c.,because compaction is disabled. 2024-12-02T23:13:58,782 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235939.e51dffa61d3cd9bf7afa2719f876bf5c. 2024-12-02T23:13:58,782 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235939.e51dffa61d3cd9bf7afa2719f876bf5c. 2024-12-02T23:13:58,782 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=e51dffa61d3cd9bf7afa2719f876bf5c, regionState=OPEN, openSeqNum=9, regionLocation=9e5ab3b623d7,39847,1733180945622 2024-12-02T23:13:58,784 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=154, ppid=153, state=RUNNABLE, hasLock=false; OpenRegionProcedure e51dffa61d3cd9bf7afa2719f876bf5c, server=9e5ab3b623d7,39847,1733180945622 because future has completed 2024-12-02T23:13:58,787 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=154, resume processing ppid=153 2024-12-02T23:13:58,787 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=154, ppid=153, state=SUCCESS, hasLock=false; OpenRegionProcedure e51dffa61d3cd9bf7afa2719f876bf5c, server=9e5ab3b623d7,39847,1733180945622 in 186 msec 2024-12-02T23:13:58,789 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=153, resume processing ppid=148 2024-12-02T23:13:58,789 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=153, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e51dffa61d3cd9bf7afa2719f876bf5c, ASSIGN in 344 msec 2024-12-02T23:13:58,791 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=148, state=SUCCESS, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[116bbb1f3ad85fbca43dabb0569f488d, d334bb82bb6c41cacabbb40f758ed67f], force=true in 1.6440 sec 2024-12-02T23:13:58,820 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T23:13:59,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-02T23:13:59,312 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: MERGE_REGIONS, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-02T23:13:59,313 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-02T23:13:59,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733181239313 (current time:1733181239313). 2024-12-02T23:13:59,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T23:13:59,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 VERSION not specified, setting to 2 2024-12-02T23:13:59,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T23:13:59,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3bace2db, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:13:59,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:13:59,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:13:59,315 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:13:59,315 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:13:59,315 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:13:59,315 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75449421, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:13:59,315 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:13:59,316 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:13:59,316 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:13:59,316 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48672, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:13:59,317 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a1d492f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:13:59,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:13:59,318 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:13:59,318 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:13:59,319 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39994, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:13:59,320 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:13:59,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:13:59,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:13:59,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:13:59,320 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:13:59,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b0c6722, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:13:59,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:13:59,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:13:59,321 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:13:59,321 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:13:59,321 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:13:59,322 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d4abbd5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:13:59,322 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:13:59,322 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:13:59,322 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:13:59,322 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48682, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:13:59,323 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c664100, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:13:59,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:13:59,324 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:13:59,324 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:13:59,325 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40004, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:13:59,326 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion-1', locateType=CURRENT is [region=hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b., hostname=9e5ab3b623d7,41311,1733180945828, seqNum=2] 2024-12-02T23:13:59,326 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:13:59,327 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56696, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:13:59,328 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:13:59,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor223.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:13:59,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:13:59,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:13:59,328 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:13:59,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-02T23:13:59,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T23:13:59,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=155, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-02T23:13:59,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 155 2024-12-02T23:13:59,330 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T23:13:59,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-02T23:13:59,331 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T23:13:59,333 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T23:13:59,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742202_1378 (size=216) 2024-12-02T23:13:59,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742202_1378 (size=216) 2024-12-02T23:13:59,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742202_1378 (size=216) 2024-12-02T23:13:59,340 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T23:13:59,340 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e51dffa61d3cd9bf7afa2719f876bf5c}] 2024-12-02T23:13:59,341 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e51dffa61d3cd9bf7afa2719f876bf5c 2024-12-02T23:13:59,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-02T23:13:59,492 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39847 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=156 2024-12-02T23:13:59,492 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235939.e51dffa61d3cd9bf7afa2719f876bf5c. 2024-12-02T23:13:59,492 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.HRegion(2603): Flush status journal for e51dffa61d3cd9bf7afa2719f876bf5c: 2024-12-02T23:13:59,492 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235939.e51dffa61d3cd9bf7afa2719f876bf5c. for snaptb0-testExportFileSystemStateWithMergeRegion-1 completed. 2024-12-02T23:13:59,493 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235939.e51dffa61d3cd9bf7afa2719f876bf5c.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-02T23:13:59,493 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:13:59,493 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e51dffa61d3cd9bf7afa2719f876bf5c/cf/c4cf533835024c56a3d7c15458aa628b.116bbb1f3ad85fbca43dabb0569f488d->hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/116bbb1f3ad85fbca43dabb0569f488d/cf/c4cf533835024c56a3d7c15458aa628b-top, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e51dffa61d3cd9bf7afa2719f876bf5c/cf/ef849f790d2c4bb09cfd9c80efd6bf3e.d334bb82bb6c41cacabbb40f758ed67f->hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d334bb82bb6c41cacabbb40f758ed67f/cf/ef849f790d2c4bb09cfd9c80efd6bf3e-top] hfiles 2024-12-02T23:13:59,493 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(265): Adding reference for file (1/2): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e51dffa61d3cd9bf7afa2719f876bf5c/cf/c4cf533835024c56a3d7c15458aa628b.116bbb1f3ad85fbca43dabb0569f488d for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-02T23:13:59,493 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(265): Adding reference for file (2/2): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e51dffa61d3cd9bf7afa2719f876bf5c/cf/ef849f790d2c4bb09cfd9c80efd6bf3e.d334bb82bb6c41cacabbb40f758ed67f for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-02T23:13:59,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742203_1379 (size=269) 2024-12-02T23:13:59,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742203_1379 (size=269) 2024-12-02T23:13:59,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742203_1379 (size=269) 2024-12-02T23:13:59,500 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235939.e51dffa61d3cd9bf7afa2719f876bf5c. 2024-12-02T23:13:59,500 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=156 2024-12-02T23:13:59,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=156 2024-12-02T23:13:59,500 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 on region e51dffa61d3cd9bf7afa2719f876bf5c 2024-12-02T23:13:59,500 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e51dffa61d3cd9bf7afa2719f876bf5c 2024-12-02T23:13:59,503 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=156, resume processing ppid=155 2024-12-02T23:13:59,503 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=156, ppid=155, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e51dffa61d3cd9bf7afa2719f876bf5c in 161 msec 2024-12-02T23:13:59,503 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T23:13:59,503 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T23:13:59,504 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T23:13:59,504 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-02T23:13:59,505 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-02T23:13:59,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742204_1380 (size=670) 2024-12-02T23:13:59,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742204_1380 (size=670) 2024-12-02T23:13:59,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742204_1380 (size=670) 2024-12-02T23:13:59,514 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T23:13:59,519 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T23:13:59,519 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-02T23:13:59,520 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T23:13:59,520 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 155 2024-12-02T23:13:59,521 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=155, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } in 191 msec 2024-12-02T23:13:59,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-02T23:13:59,652 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-02T23:13:59,652 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181239652 2024-12-02T23:13:59,652 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:33369, tgtDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181239652, rawTgtDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181239652, srcFsUri=hdfs://localhost:33369, srcDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:13:59,681 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:33369, inputRoot=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:13:59,681 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_71179475_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181239652, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181239652/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-02T23:13:59,682 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-02T23:13:59,687 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181239652/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-02T23:13:59,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742205_1381 (size=216) 2024-12-02T23:13:59,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742205_1381 (size=216) 2024-12-02T23:13:59,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742206_1382 (size=670) 2024-12-02T23:13:59,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742205_1381 (size=216) 2024-12-02T23:13:59,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742206_1382 (size=670) 2024-12-02T23:13:59,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742206_1382 (size=670) 2024-12-02T23:13:59,703 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:13:59,703 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:13:59,703 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:14:00,880 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop-6823573955201659014.jar 2024-12-02T23:14:00,880 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:14:00,881 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:14:00,965 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop-15880796363804057453.jar 2024-12-02T23:14:00,965 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:14:00,966 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:14:00,966 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:14:00,966 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:14:00,967 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:14:00,967 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:14:00,967 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-02T23:14:00,968 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-02T23:14:00,968 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-02T23:14:00,968 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-02T23:14:00,968 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-02T23:14:00,969 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-02T23:14:00,969 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-02T23:14:00,969 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-02T23:14:00,970 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-02T23:14:00,970 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-02T23:14:00,970 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-02T23:14:00,971 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:14:00,971 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:14:00,971 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T23:14:00,972 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:14:00,972 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:14:00,972 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T23:14:00,972 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T23:14:01,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742207_1383 (size=131440) 2024-12-02T23:14:01,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742207_1383 (size=131440) 2024-12-02T23:14:01,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742207_1383 (size=131440) 2024-12-02T23:14:01,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742208_1384 (size=4188619) 2024-12-02T23:14:01,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742208_1384 (size=4188619) 2024-12-02T23:14:01,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742208_1384 (size=4188619) 2024-12-02T23:14:01,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742209_1385 (size=1323991) 2024-12-02T23:14:01,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742209_1385 (size=1323991) 2024-12-02T23:14:01,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742209_1385 (size=1323991) 2024-12-02T23:14:01,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742210_1386 (size=903926) 2024-12-02T23:14:01,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742210_1386 (size=903926) 2024-12-02T23:14:01,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742210_1386 (size=903926) 2024-12-02T23:14:01,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742211_1387 (size=8360083) 2024-12-02T23:14:01,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742211_1387 (size=8360083) 2024-12-02T23:14:01,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742211_1387 (size=8360083) 2024-12-02T23:14:01,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742212_1388 (size=443172) 2024-12-02T23:14:01,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742212_1388 (size=443172) 2024-12-02T23:14:01,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742212_1388 (size=443172) 2024-12-02T23:14:01,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742213_1389 (size=1877034) 2024-12-02T23:14:01,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742213_1389 (size=1877034) 2024-12-02T23:14:01,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742213_1389 (size=1877034) 2024-12-02T23:14:01,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742214_1390 (size=77835) 2024-12-02T23:14:01,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742214_1390 (size=77835) 2024-12-02T23:14:01,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742214_1390 (size=77835) 2024-12-02T23:14:01,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742215_1391 (size=30949) 2024-12-02T23:14:01,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742215_1391 (size=30949) 2024-12-02T23:14:01,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742215_1391 (size=30949) 2024-12-02T23:14:02,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742216_1392 (size=1597213) 2024-12-02T23:14:02,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742216_1392 (size=1597213) 2024-12-02T23:14:02,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742216_1392 (size=1597213) 2024-12-02T23:14:02,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742217_1393 (size=6424743) 2024-12-02T23:14:02,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742217_1393 (size=6424743) 2024-12-02T23:14:02,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742217_1393 (size=6424743) 2024-12-02T23:14:02,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742218_1394 (size=4695811) 2024-12-02T23:14:02,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742218_1394 (size=4695811) 2024-12-02T23:14:02,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742218_1394 (size=4695811) 2024-12-02T23:14:02,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742219_1395 (size=232957) 2024-12-02T23:14:02,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742219_1395 (size=232957) 2024-12-02T23:14:02,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742219_1395 (size=232957) 2024-12-02T23:14:02,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742220_1396 (size=127628) 2024-12-02T23:14:02,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742220_1396 (size=127628) 2024-12-02T23:14:02,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742220_1396 (size=127628) 2024-12-02T23:14:02,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742221_1397 (size=20406) 2024-12-02T23:14:02,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742221_1397 (size=20406) 2024-12-02T23:14:02,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742221_1397 (size=20406) 2024-12-02T23:14:02,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742222_1398 (size=5175431) 2024-12-02T23:14:02,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742222_1398 (size=5175431) 2024-12-02T23:14:02,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742222_1398 (size=5175431) 2024-12-02T23:14:02,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742223_1399 (size=217634) 2024-12-02T23:14:02,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742223_1399 (size=217634) 2024-12-02T23:14:02,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742223_1399 (size=217634) 2024-12-02T23:14:02,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742224_1400 (size=1832290) 2024-12-02T23:14:02,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742224_1400 (size=1832290) 2024-12-02T23:14:02,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742224_1400 (size=1832290) 2024-12-02T23:14:02,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742225_1401 (size=322274) 2024-12-02T23:14:02,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742225_1401 (size=322274) 2024-12-02T23:14:02,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742225_1401 (size=322274) 2024-12-02T23:14:02,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742226_1402 (size=503880) 2024-12-02T23:14:02,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742226_1402 (size=503880) 2024-12-02T23:14:02,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742226_1402 (size=503880) 2024-12-02T23:14:02,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742227_1403 (size=29229) 2024-12-02T23:14:02,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742227_1403 (size=29229) 2024-12-02T23:14:02,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742227_1403 (size=29229) 2024-12-02T23:14:02,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742228_1404 (size=24096) 2024-12-02T23:14:02,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742228_1404 (size=24096) 2024-12-02T23:14:02,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742228_1404 (size=24096) 2024-12-02T23:14:02,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742229_1405 (size=111872) 2024-12-02T23:14:02,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742229_1405 (size=111872) 2024-12-02T23:14:02,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742229_1405 (size=111872) 2024-12-02T23:14:02,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742230_1406 (size=45609) 2024-12-02T23:14:02,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742230_1406 (size=45609) 2024-12-02T23:14:02,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742230_1406 (size=45609) 2024-12-02T23:14:02,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742231_1407 (size=136454) 2024-12-02T23:14:02,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742231_1407 (size=136454) 2024-12-02T23:14:02,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742231_1407 (size=136454) 2024-12-02T23:14:02,968 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-02T23:14:02,971 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemStateWithMergeRegion-1' hfile list 2024-12-02T23:14:02,973 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=4.8 K 2024-12-02T23:14:02,974 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=4.8 K 2024-12-02T23:14:03,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742232_1408 (size=481) 2024-12-02T23:14:03,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742232_1408 (size=481) 2024-12-02T23:14:03,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742232_1408 (size=481) 2024-12-02T23:14:03,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742233_1409 (size=21) 2024-12-02T23:14:03,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742233_1409 (size=21) 2024-12-02T23:14:03,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742233_1409 (size=21) 2024-12-02T23:14:03,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742234_1410 (size=304055) 2024-12-02T23:14:03,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742234_1410 (size=304055) 2024-12-02T23:14:03,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742234_1410 (size=304055) 2024-12-02T23:14:03,340 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T23:14:03,340 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T23:14:03,599 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T23:14:03,658 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0007_000001 (auth:SIMPLE) from 127.0.0.1:34332 2024-12-02T23:14:04,262 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T23:14:05,202 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T23:14:05,202 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 Metrics about Tables on a single HBase RegionServer 2024-12-02T23:14:05,745 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=662.40 KB, freeSize=879.35 MB, max=880 MB, blockCount=2, accesses=2, hits=0, hitRatio=0, cachingAccesses=2, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-12-02T23:14:05,802 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=660.99 KB, freeSize=879.35 MB, max=880 MB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-12-02T23:14:05,846 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=919.14 KB, freeSize=879.10 MB, max=880 MB, blockCount=3, accesses=5, hits=2, hitRatio=40.00%, , cachingAccesses=5, cachingHits=2, cachingHitsRatio=40.00%, evictions=29, evicted=0, evictedPerRun=0.0 2024-12-02T23:14:05,951 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-02T23:14:05,953 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=2, created chunk count=10, reused chunk count=14, reuseRatio=58.33% 2024-12-02T23:14:08,043 INFO [regionserver/9e5ab3b623d7:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-02T23:14:08,050 INFO [regionserver/9e5ab3b623d7:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-02T23:14:08,108 INFO [regionserver/9e5ab3b623d7:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-02T23:14:09,041 INFO [regionserver/9e5ab3b623d7:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:meta,,1.1588230740 because 1588230740/ns has an old edit so flush to free WALs after random delay 132291 ms 2024-12-02T23:14:09,991 DEBUG [master/9e5ab3b623d7:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 65f1f4f527867ff1a6f9bc6f8478cb64 changed from -1.0 to 0.0, refreshing cache 2024-12-02T23:14:09,992 DEBUG [master/9e5ab3b623d7:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 4adc1b05c211d1eb3660387c9531d6b5 changed from -1.0 to 0.0, refreshing cache 2024-12-02T23:14:09,992 DEBUG [master/9e5ab3b623d7:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(138): Balancing RSGroup=default 2024-12-02T23:14:09,992 INFO [master/9e5ab3b623d7:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(151): Start Generate Balance plan for group: default 2024-12-02T23:14:09,992 DEBUG [master/9e5ab3b623d7:0.Chore.1 {}] balancer.BaseLoadBalancer(619): Start Generate Balance plan for cluster. 2024-12-02T23:14:09,994 DEBUG [master/9e5ab3b623d7:0.Chore.1 {}] balancer.BalancerClusterState(204): Hosts are {9e5ab3b623d7=0} racks are {/default-rack=0} 2024-12-02T23:14:09,998 DEBUG [master/9e5ab3b623d7:0.Chore.1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:14:10,002 DEBUG [master/9e5ab3b623d7:0.Chore.1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:14:10,018 DEBUG [master/9e5ab3b623d7:0.Chore.1 {}] balancer.BalancerClusterState(303): server 0 has 1 regions 2024-12-02T23:14:10,018 DEBUG [master/9e5ab3b623d7:0.Chore.1 {}] balancer.BalancerClusterState(303): server 1 has 2 regions 2024-12-02T23:14:10,018 DEBUG [master/9e5ab3b623d7:0.Chore.1 {}] balancer.BalancerClusterState(303): server 2 has 2 regions 2024-12-02T23:14:10,018 DEBUG [master/9e5ab3b623d7:0.Chore.1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T23:14:10,018 DEBUG [master/9e5ab3b623d7:0.Chore.1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T23:14:10,018 DEBUG [master/9e5ab3b623d7:0.Chore.1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T23:14:10,019 INFO [master/9e5ab3b623d7:0.Chore.1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T23:14:10,019 INFO [master/9e5ab3b623d7:0.Chore.1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T23:14:10,019 INFO [master/9e5ab3b623d7:0.Chore.1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T23:14:10,019 DEBUG [master/9e5ab3b623d7:0.Chore.1 {}] balancer.BalancerClusterState(326): Number of tables=4, number of hosts=1, number of racks=1 2024-12-02T23:14:10,024 INFO [master/9e5ab3b623d7:0.Chore.1 {}] balancer.StochasticLoadBalancer(395): Cluster wide - skipping load balancing because weighted average imbalance=0.01389719956184001 <= threshold(0.025). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 0.025 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.8267634493938528, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.8603565774135244, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-12-02T23:14:10,024 DEBUG [master/9e5ab3b623d7:0.Chore.1 {}] master.HMaster(2203): Balancer is going into sleep until next period in 300000ms 2024-12-02T23:14:10,030 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testtb-testExportFileSystemStateWithMergeRegion because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-12-02T23:14:10,030 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testtb-testExportFileSystemStateWithMergeRegion-1 because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-12-02T23:14:10,037 DEBUG [master/9e5ab3b623d7:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-02T23:14:10,037 DEBUG [master/9e5ab3b623d7:0.Chore.1 {}] janitor.CatalogJanitor(258): Cleaning merged region {ENCODED => e51dffa61d3cd9bf7afa2719f876bf5c, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235939.e51dffa61d3cd9bf7afa2719f876bf5c.', STARTKEY => '', ENDKEY => ''} 2024-12-02T23:14:10,040 DEBUG [master/9e5ab3b623d7:0.Chore.1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:14:10,042 DEBUG [master/9e5ab3b623d7:0.Chore.1 {}] janitor.CatalogJanitor(283): Deferring cleanup up of 2 parents of merged region e51dffa61d3cd9bf7afa2719f876bf5c, because references still exist in merged region or we encountered an exception in checking 2024-12-02T23:14:10,591 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0007_000001 (auth:SIMPLE) from 127.0.0.1:47326 2024-12-02T23:14:11,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742235_1411 (size=349753) 2024-12-02T23:14:11,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742235_1411 (size=349753) 2024-12-02T23:14:11,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742235_1411 (size=349753) 2024-12-02T23:14:12,842 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0007_000001 (auth:SIMPLE) from 127.0.0.1:44204 2024-12-02T23:14:12,842 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0007_000001 (auth:SIMPLE) from 127.0.0.1:46762 2024-12-02T23:14:14,299 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T23:14:16,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742236_1412 (size=4945) 2024-12-02T23:14:16,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742236_1412 (size=4945) 2024-12-02T23:14:16,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742236_1412 (size=4945) 2024-12-02T23:14:16,921 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_0/usercache/jenkins/appcache/application_1733180955226_0007/container_1733180955226_0007_01_000002/launch_container.sh] 2024-12-02T23:14:16,921 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_0/usercache/jenkins/appcache/application_1733180955226_0007/container_1733180955226_0007_01_000002/container_tokens] 2024-12-02T23:14:16,921 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_0/usercache/jenkins/appcache/application_1733180955226_0007/container_1733180955226_0007_01_000002/sysfs] 2024-12-02T23:14:17,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742238_1414 (size=4945) 2024-12-02T23:14:17,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742238_1414 (size=4945) 2024-12-02T23:14:17,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742238_1414 (size=4945) 2024-12-02T23:14:17,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742237_1413 (size=22246) 2024-12-02T23:14:17,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742237_1413 (size=22246) 2024-12-02T23:14:17,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742237_1413 (size=22246) 2024-12-02T23:14:17,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742239_1415 (size=482) 2024-12-02T23:14:17,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742239_1415 (size=482) 2024-12-02T23:14:17,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742239_1415 (size=482) 2024-12-02T23:14:17,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742240_1416 (size=22246) 2024-12-02T23:14:17,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742240_1416 (size=22246) 2024-12-02T23:14:17,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742240_1416 (size=22246) 2024-12-02T23:14:17,745 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_3/usercache/jenkins/appcache/application_1733180955226_0007/container_1733180955226_0007_01_000003/launch_container.sh] 2024-12-02T23:14:17,745 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_3/usercache/jenkins/appcache/application_1733180955226_0007/container_1733180955226_0007_01_000003/container_tokens] 2024-12-02T23:14:17,745 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_3/usercache/jenkins/appcache/application_1733180955226_0007/container_1733180955226_0007_01_000003/sysfs] 2024-12-02T23:14:17,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742241_1417 (size=349753) 2024-12-02T23:14:17,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742241_1417 (size=349753) 2024-12-02T23:14:17,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742241_1417 (size=349753) 2024-12-02T23:14:17,763 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0007_000001 (auth:SIMPLE) from 127.0.0.1:44214 2024-12-02T23:14:19,508 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-02T23:14:19,509 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-02T23:14:19,515 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-02T23:14:19,515 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-02T23:14:19,515 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-02T23:14:19,515 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_71179475_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-02T23:14:19,515 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-02T23:14:19,515 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-02T23:14:19,515 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_71179475_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181239652/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181239652/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-02T23:14:19,516 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181239652/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-02T23:14:19,516 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181239652/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-02T23:14:19,520 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T23:14:19,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=157, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T23:14:19,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-12-02T23:14:19,523 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181259523"}]},"ts":"1733181259523"} 2024-12-02T23:14:19,525 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLING in hbase:meta 2024-12-02T23:14:19,525 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLING 2024-12-02T23:14:19,525 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1}] 2024-12-02T23:14:19,527 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e51dffa61d3cd9bf7afa2719f876bf5c, UNASSIGN}] 2024-12-02T23:14:19,527 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e51dffa61d3cd9bf7afa2719f876bf5c, UNASSIGN 2024-12-02T23:14:19,528 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=e51dffa61d3cd9bf7afa2719f876bf5c, regionState=CLOSING, regionLocation=9e5ab3b623d7,39847,1733180945622 2024-12-02T23:14:19,529 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e51dffa61d3cd9bf7afa2719f876bf5c, UNASSIGN because future has completed 2024-12-02T23:14:19,530 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T23:14:19,530 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE, hasLock=false; CloseRegionProcedure e51dffa61d3cd9bf7afa2719f876bf5c, server=9e5ab3b623d7,39847,1733180945622}] 2024-12-02T23:14:19,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-12-02T23:14:19,682 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(122): Close e51dffa61d3cd9bf7afa2719f876bf5c 2024-12-02T23:14:19,682 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T23:14:19,682 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1722): Closing e51dffa61d3cd9bf7afa2719f876bf5c, disabling compactions & flushes 2024-12-02T23:14:19,682 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235939.e51dffa61d3cd9bf7afa2719f876bf5c. 2024-12-02T23:14:19,682 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235939.e51dffa61d3cd9bf7afa2719f876bf5c. 2024-12-02T23:14:19,682 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235939.e51dffa61d3cd9bf7afa2719f876bf5c. after waiting 0 ms 2024-12-02T23:14:19,682 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235939.e51dffa61d3cd9bf7afa2719f876bf5c. 2024-12-02T23:14:19,686 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e51dffa61d3cd9bf7afa2719f876bf5c/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=8 2024-12-02T23:14:19,687 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:14:19,687 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235939.e51dffa61d3cd9bf7afa2719f876bf5c. 2024-12-02T23:14:19,687 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1676): Region close journal for e51dffa61d3cd9bf7afa2719f876bf5c: Waiting for close lock at 1733181259682Running coprocessor pre-close hooks at 1733181259682Disabling compacts and flushes for region at 1733181259682Disabling writes for close at 1733181259682Writing region close event to WAL at 1733181259683 (+1 ms)Running coprocessor post-close hooks at 1733181259687 (+4 ms)Closed at 1733181259687 2024-12-02T23:14:19,688 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(157): Closed e51dffa61d3cd9bf7afa2719f876bf5c 2024-12-02T23:14:19,689 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=e51dffa61d3cd9bf7afa2719f876bf5c, regionState=CLOSED 2024-12-02T23:14:19,690 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=160, ppid=159, state=RUNNABLE, hasLock=false; CloseRegionProcedure e51dffa61d3cd9bf7afa2719f876bf5c, server=9e5ab3b623d7,39847,1733180945622 because future has completed 2024-12-02T23:14:19,693 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=160, resume processing ppid=159 2024-12-02T23:14:19,693 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=160, ppid=159, state=SUCCESS, hasLock=false; CloseRegionProcedure e51dffa61d3cd9bf7afa2719f876bf5c, server=9e5ab3b623d7,39847,1733180945622 in 161 msec 2024-12-02T23:14:19,694 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=159, resume processing ppid=158 2024-12-02T23:14:19,694 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=159, ppid=158, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e51dffa61d3cd9bf7afa2719f876bf5c, UNASSIGN in 166 msec 2024-12-02T23:14:19,697 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=158, resume processing ppid=157 2024-12-02T23:14:19,697 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=158, ppid=157, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 170 msec 2024-12-02T23:14:19,698 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181259698"}]},"ts":"1733181259698"} 2024-12-02T23:14:19,699 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLED in hbase:meta 2024-12-02T23:14:19,699 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLED 2024-12-02T23:14:19,701 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=157, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 179 msec 2024-12-02T23:14:19,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-12-02T23:14:19,842 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-02T23:14:19,843 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T23:14:19,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T23:14:19,844 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T23:14:19,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T23:14:19,845 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=161, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T23:14:19,847 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41311 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T23:14:19,847 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e51dffa61d3cd9bf7afa2719f876bf5c 2024-12-02T23:14:19,847 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/116bbb1f3ad85fbca43dabb0569f488d 2024-12-02T23:14:19,848 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d334bb82bb6c41cacabbb40f758ed67f 2024-12-02T23:14:19,849 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d334bb82bb6c41cacabbb40f758ed67f/cf, FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d334bb82bb6c41cacabbb40f758ed67f/recovered.edits] 2024-12-02T23:14:19,849 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/116bbb1f3ad85fbca43dabb0569f488d/cf, FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/116bbb1f3ad85fbca43dabb0569f488d/recovered.edits] 2024-12-02T23:14:19,849 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e51dffa61d3cd9bf7afa2719f876bf5c/cf, FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e51dffa61d3cd9bf7afa2719f876bf5c/recovered.edits] 2024-12-02T23:14:19,850 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T23:14:19,850 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T23:14:19,850 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T23:14:19,850 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T23:14:19,851 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-02T23:14:19,851 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-02T23:14:19,851 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-02T23:14:19,851 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-02T23:14:19,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T23:14:19,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T23:14:19,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:14:19,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:14:19,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T23:14:19,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T23:14:19,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:14:19,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:14:19,853 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:19,853 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:19,853 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:19,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=161 2024-12-02T23:14:19,854 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:19,855 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/116bbb1f3ad85fbca43dabb0569f488d/cf/c4cf533835024c56a3d7c15458aa628b to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/116bbb1f3ad85fbca43dabb0569f488d/cf/c4cf533835024c56a3d7c15458aa628b 2024-12-02T23:14:19,855 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d334bb82bb6c41cacabbb40f758ed67f/cf/ef849f790d2c4bb09cfd9c80efd6bf3e to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d334bb82bb6c41cacabbb40f758ed67f/cf/ef849f790d2c4bb09cfd9c80efd6bf3e 2024-12-02T23:14:19,855 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e51dffa61d3cd9bf7afa2719f876bf5c/cf/c4cf533835024c56a3d7c15458aa628b.116bbb1f3ad85fbca43dabb0569f488d to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e51dffa61d3cd9bf7afa2719f876bf5c/cf/c4cf533835024c56a3d7c15458aa628b.116bbb1f3ad85fbca43dabb0569f488d 2024-12-02T23:14:19,857 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e51dffa61d3cd9bf7afa2719f876bf5c/cf/ef849f790d2c4bb09cfd9c80efd6bf3e.d334bb82bb6c41cacabbb40f758ed67f to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e51dffa61d3cd9bf7afa2719f876bf5c/cf/ef849f790d2c4bb09cfd9c80efd6bf3e.d334bb82bb6c41cacabbb40f758ed67f 2024-12-02T23:14:19,858 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/116bbb1f3ad85fbca43dabb0569f488d/recovered.edits/8.seqid to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/116bbb1f3ad85fbca43dabb0569f488d/recovered.edits/8.seqid 2024-12-02T23:14:19,858 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d334bb82bb6c41cacabbb40f758ed67f/recovered.edits/8.seqid to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d334bb82bb6c41cacabbb40f758ed67f/recovered.edits/8.seqid 2024-12-02T23:14:19,858 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/116bbb1f3ad85fbca43dabb0569f488d 2024-12-02T23:14:19,858 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/d334bb82bb6c41cacabbb40f758ed67f 2024-12-02T23:14:19,859 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e51dffa61d3cd9bf7afa2719f876bf5c/recovered.edits/12.seqid to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e51dffa61d3cd9bf7afa2719f876bf5c/recovered.edits/12.seqid 2024-12-02T23:14:19,860 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e51dffa61d3cd9bf7afa2719f876bf5c 2024-12-02T23:14:19,860 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion-1 regions 2024-12-02T23:14:19,861 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=161, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T23:14:19,863 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of testtb-testExportFileSystemStateWithMergeRegion-1 from hbase:meta 2024-12-02T23:14:19,865 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' descriptor. 2024-12-02T23:14:19,866 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=161, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T23:14:19,866 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' from region states. 2024-12-02T23:14:19,866 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235939.e51dffa61d3cd9bf7afa2719f876bf5c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733181259866"}]},"ts":"9223372036854775807"} 2024-12-02T23:14:19,868 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-12-02T23:14:19,868 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => e51dffa61d3cd9bf7afa2719f876bf5c, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235939.e51dffa61d3cd9bf7afa2719f876bf5c.', STARTKEY => '', ENDKEY => ''}] 2024-12-02T23:14:19,868 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion-1' as deleted. 2024-12-02T23:14:19,868 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733181259868"}]},"ts":"9223372036854775807"} 2024-12-02T23:14:19,869 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion-1 state from META 2024-12-02T23:14:19,870 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=161, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T23:14:19,871 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=161, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 27 msec 2024-12-02T23:14:19,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=161 2024-12-02T23:14:19,962 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T23:14:19,962 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-02T23:14:19,963 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T23:14:19,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=162, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T23:14:19,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-12-02T23:14:19,966 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181259965"}]},"ts":"1733181259965"} 2024-12-02T23:14:19,967 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLING in hbase:meta 2024-12-02T23:14:19,967 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLING 2024-12-02T23:14:19,968 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion}] 2024-12-02T23:14:19,969 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=65f1f4f527867ff1a6f9bc6f8478cb64, UNASSIGN}, {pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=4adc1b05c211d1eb3660387c9531d6b5, UNASSIGN}] 2024-12-02T23:14:19,970 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=4adc1b05c211d1eb3660387c9531d6b5, UNASSIGN 2024-12-02T23:14:19,970 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=65f1f4f527867ff1a6f9bc6f8478cb64, UNASSIGN 2024-12-02T23:14:19,970 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=165 updating hbase:meta row=4adc1b05c211d1eb3660387c9531d6b5, regionState=CLOSING, regionLocation=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:14:19,970 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=164 updating hbase:meta row=65f1f4f527867ff1a6f9bc6f8478cb64, regionState=CLOSING, regionLocation=9e5ab3b623d7,38049,1733180945779 2024-12-02T23:14:19,972 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=4adc1b05c211d1eb3660387c9531d6b5, UNASSIGN because future has completed 2024-12-02T23:14:19,972 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T23:14:19,972 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4adc1b05c211d1eb3660387c9531d6b5, server=9e5ab3b623d7,41311,1733180945828}] 2024-12-02T23:14:19,972 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=65f1f4f527867ff1a6f9bc6f8478cb64, UNASSIGN because future has completed 2024-12-02T23:14:19,972 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T23:14:19,973 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=167, ppid=164, state=RUNNABLE, hasLock=false; CloseRegionProcedure 65f1f4f527867ff1a6f9bc6f8478cb64, server=9e5ab3b623d7,38049,1733180945779}] 2024-12-02T23:14:20,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-12-02T23:14:20,077 INFO [regionserver/9e5ab3b623d7:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b. because a8d1ca9c63bdfff14551a9b4d2eb111b/l has an old edit so flush to free WALs after random delay 142425 ms 2024-12-02T23:14:20,124 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(122): Close 4adc1b05c211d1eb3660387c9531d6b5 2024-12-02T23:14:20,124 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T23:14:20,125 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1722): Closing 4adc1b05c211d1eb3660387c9531d6b5, disabling compactions & flushes 2024-12-02T23:14:20,125 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1733181233641.4adc1b05c211d1eb3660387c9531d6b5. 2024-12-02T23:14:20,125 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733181233641.4adc1b05c211d1eb3660387c9531d6b5. 2024-12-02T23:14:20,125 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733181233641.4adc1b05c211d1eb3660387c9531d6b5. after waiting 0 ms 2024-12-02T23:14:20,125 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1733181233641.4adc1b05c211d1eb3660387c9531d6b5. 2024-12-02T23:14:20,125 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(122): Close 65f1f4f527867ff1a6f9bc6f8478cb64 2024-12-02T23:14:20,125 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T23:14:20,125 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1722): Closing 65f1f4f527867ff1a6f9bc6f8478cb64, disabling compactions & flushes 2024-12-02T23:14:20,125 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1733181233641.65f1f4f527867ff1a6f9bc6f8478cb64. 2024-12-02T23:14:20,125 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733181233641.65f1f4f527867ff1a6f9bc6f8478cb64. 2024-12-02T23:14:20,125 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733181233641.65f1f4f527867ff1a6f9bc6f8478cb64. after waiting 0 ms 2024-12-02T23:14:20,125 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1733181233641.65f1f4f527867ff1a6f9bc6f8478cb64. 2024-12-02T23:14:20,131 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion/4adc1b05c211d1eb3660387c9531d6b5/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T23:14:20,131 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:14:20,131 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1733181233641.4adc1b05c211d1eb3660387c9531d6b5. 2024-12-02T23:14:20,131 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1676): Region close journal for 4adc1b05c211d1eb3660387c9531d6b5: Waiting for close lock at 1733181260124Running coprocessor pre-close hooks at 1733181260124Disabling compacts and flushes for region at 1733181260124Disabling writes for close at 1733181260125 (+1 ms)Writing region close event to WAL at 1733181260126 (+1 ms)Running coprocessor post-close hooks at 1733181260131 (+5 ms)Closed at 1733181260131 2024-12-02T23:14:20,133 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(157): Closed 4adc1b05c211d1eb3660387c9531d6b5 2024-12-02T23:14:20,133 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=165 updating hbase:meta row=4adc1b05c211d1eb3660387c9531d6b5, regionState=CLOSED 2024-12-02T23:14:20,134 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion/65f1f4f527867ff1a6f9bc6f8478cb64/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T23:14:20,134 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:14:20,134 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1733181233641.65f1f4f527867ff1a6f9bc6f8478cb64. 2024-12-02T23:14:20,134 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1676): Region close journal for 65f1f4f527867ff1a6f9bc6f8478cb64: Waiting for close lock at 1733181260125Running coprocessor pre-close hooks at 1733181260125Disabling compacts and flushes for region at 1733181260125Disabling writes for close at 1733181260125Writing region close event to WAL at 1733181260126 (+1 ms)Running coprocessor post-close hooks at 1733181260134 (+8 ms)Closed at 1733181260134 2024-12-02T23:14:20,135 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=166, ppid=165, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4adc1b05c211d1eb3660387c9531d6b5, server=9e5ab3b623d7,41311,1733180945828 because future has completed 2024-12-02T23:14:20,136 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(157): Closed 65f1f4f527867ff1a6f9bc6f8478cb64 2024-12-02T23:14:20,136 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=164 updating hbase:meta row=65f1f4f527867ff1a6f9bc6f8478cb64, regionState=CLOSED 2024-12-02T23:14:20,137 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=167, ppid=164, state=RUNNABLE, hasLock=false; CloseRegionProcedure 65f1f4f527867ff1a6f9bc6f8478cb64, server=9e5ab3b623d7,38049,1733180945779 because future has completed 2024-12-02T23:14:20,138 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=166, resume processing ppid=165 2024-12-02T23:14:20,138 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=166, ppid=165, state=SUCCESS, hasLock=false; CloseRegionProcedure 4adc1b05c211d1eb3660387c9531d6b5, server=9e5ab3b623d7,41311,1733180945828 in 164 msec 2024-12-02T23:14:20,139 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=165, ppid=163, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=4adc1b05c211d1eb3660387c9531d6b5, UNASSIGN in 169 msec 2024-12-02T23:14:20,140 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=167, resume processing ppid=164 2024-12-02T23:14:20,140 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=167, ppid=164, state=SUCCESS, hasLock=false; CloseRegionProcedure 65f1f4f527867ff1a6f9bc6f8478cb64, server=9e5ab3b623d7,38049,1733180945779 in 166 msec 2024-12-02T23:14:20,142 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=164, resume processing ppid=163 2024-12-02T23:14:20,142 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=164, ppid=163, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=65f1f4f527867ff1a6f9bc6f8478cb64, UNASSIGN in 171 msec 2024-12-02T23:14:20,144 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=163, resume processing ppid=162 2024-12-02T23:14:20,144 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=163, ppid=162, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 175 msec 2024-12-02T23:14:20,146 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181260146"}]},"ts":"1733181260146"} 2024-12-02T23:14:20,147 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLED in hbase:meta 2024-12-02T23:14:20,147 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLED 2024-12-02T23:14:20,149 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=162, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 185 msec 2024-12-02T23:14:20,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-12-02T23:14:20,282 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-02T23:14:20,283 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T23:14:20,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=168, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T23:14:20,285 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=168, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T23:14:20,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T23:14:20,286 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=168, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T23:14:20,288 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41311 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T23:14:20,289 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion/65f1f4f527867ff1a6f9bc6f8478cb64 2024-12-02T23:14:20,289 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion/4adc1b05c211d1eb3660387c9531d6b5 2024-12-02T23:14:20,291 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion/65f1f4f527867ff1a6f9bc6f8478cb64/cf, FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion/65f1f4f527867ff1a6f9bc6f8478cb64/recovered.edits] 2024-12-02T23:14:20,291 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion/4adc1b05c211d1eb3660387c9531d6b5/cf, FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion/4adc1b05c211d1eb3660387c9531d6b5/recovered.edits] 2024-12-02T23:14:20,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T23:14:20,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T23:14:20,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T23:14:20,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T23:14:20,293 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-02T23:14:20,293 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-02T23:14:20,293 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-02T23:14:20,293 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-02T23:14:20,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T23:14:20,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T23:14:20,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:14:20,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:14:20,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T23:14:20,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:14:20,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T23:14:20,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:14:20,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-12-02T23:14:20,297 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion/65f1f4f527867ff1a6f9bc6f8478cb64/cf/1c6b88aea3cf4d9aa85367b253acbe1a to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/65f1f4f527867ff1a6f9bc6f8478cb64/cf/1c6b88aea3cf4d9aa85367b253acbe1a 2024-12-02T23:14:20,297 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion/4adc1b05c211d1eb3660387c9531d6b5/cf/398bffd80ce648fc8117390b4a27780e to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/4adc1b05c211d1eb3660387c9531d6b5/cf/398bffd80ce648fc8117390b4a27780e 2024-12-02T23:14:20,300 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion/65f1f4f527867ff1a6f9bc6f8478cb64/recovered.edits/9.seqid to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/65f1f4f527867ff1a6f9bc6f8478cb64/recovered.edits/9.seqid 2024-12-02T23:14:20,300 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion/65f1f4f527867ff1a6f9bc6f8478cb64 2024-12-02T23:14:20,301 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion/4adc1b05c211d1eb3660387c9531d6b5/recovered.edits/9.seqid to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/4adc1b05c211d1eb3660387c9531d6b5/recovered.edits/9.seqid 2024-12-02T23:14:20,301 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithMergeRegion/4adc1b05c211d1eb3660387c9531d6b5 2024-12-02T23:14:20,301 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion regions 2024-12-02T23:14:20,302 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c 2024-12-02T23:14:20,302 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf] 2024-12-02T23:14:20,306 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b202412025155527e379041c6ac3427bd1ccd6564_4adc1b05c211d1eb3660387c9531d6b5 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b202412025155527e379041c6ac3427bd1ccd6564_4adc1b05c211d1eb3660387c9531d6b5 2024-12-02T23:14:20,308 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241202e71a5bcc7dd045659ed756e920e5b270_65f1f4f527867ff1a6f9bc6f8478cb64 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241202e71a5bcc7dd045659ed756e920e5b270_65f1f4f527867ff1a6f9bc6f8478cb64 2024-12-02T23:14:20,308 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c 2024-12-02T23:14:20,310 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=168, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T23:14:20,313 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithMergeRegion from hbase:meta 2024-12-02T23:14:20,315 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion' descriptor. 2024-12-02T23:14:20,316 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=168, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T23:14:20,316 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion' from region states. 2024-12-02T23:14:20,316 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1733181233641.65f1f4f527867ff1a6f9bc6f8478cb64.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733181260316"}]},"ts":"9223372036854775807"} 2024-12-02T23:14:20,316 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1733181233641.4adc1b05c211d1eb3660387c9531d6b5.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733181260316"}]},"ts":"9223372036854775807"} 2024-12-02T23:14:20,318 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-02T23:14:20,318 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 65f1f4f527867ff1a6f9bc6f8478cb64, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733181233641.65f1f4f527867ff1a6f9bc6f8478cb64.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 4adc1b05c211d1eb3660387c9531d6b5, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733181233641.4adc1b05c211d1eb3660387c9531d6b5.', STARTKEY => '1', ENDKEY => ''}] 2024-12-02T23:14:20,318 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion' as deleted. 2024-12-02T23:14:20,318 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733181260318"}]},"ts":"9223372036854775807"} 2024-12-02T23:14:20,320 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion state from META 2024-12-02T23:14:20,320 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=168, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T23:14:20,322 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=168, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 37 msec 2024-12-02T23:14:20,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-12-02T23:14:20,402 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T23:14:20,402 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-02T23:14:20,410 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-12-02T23:14:20,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T23:14:20,412 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-12-02T23:14:20,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T23:14:20,415 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion-1" type: DISABLED 2024-12-02T23:14:20,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-02T23:14:20,437 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=810 (was 799) Potentially hanging thread: ApplicationMasterLauncher #11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_71179475_22 at /127.0.0.1:42350 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (243496960) connection to localhost/127.0.0.1:35647 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37313 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_71179475_22 at /127.0.0.1:32824 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 11760) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (243496960) connection to localhost/127.0.0.1:37313 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ApplicationMasterLauncher #12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1694298161_1 at /127.0.0.1:32798 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_71179475_22 at /127.0.0.1:34808 [Waiting for operation #9] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-6327 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) - Thread LEAK? -, OpenFileDescriptor=804 (was 781) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1039 (was 1154), ProcessCount=16 (was 19), AvailableMemoryMB=1387 (was 1829) 2024-12-02T23:14:20,437 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=810 is superior to 500 2024-12-02T23:14:20,455 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportExpiredSnapshot Thread=810, OpenFileDescriptor=804, MaxFileDescriptor=1048576, SystemLoadAverage=1039, ProcessCount=16, AvailableMemoryMB=1386 2024-12-02T23:14:20,455 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=810 is superior to 500 2024-12-02T23:14:20,456 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T23:14:20,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=169, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-02T23:14:20,458 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T23:14:20,458 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportExpiredSnapshot" procId is: 169 2024-12-02T23:14:20,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-02T23:14:20,459 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T23:14:20,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742242_1418 (size=443) 2024-12-02T23:14:20,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742242_1418 (size=443) 2024-12-02T23:14:20,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742242_1418 (size=443) 2024-12-02T23:14:20,468 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ad49f520e0d675103d5333bf70f6b644, NAME => 'testtb-testExportExpiredSnapshot,,1733181260456.ad49f520e0d675103d5333bf70f6b644.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:14:20,468 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 9f38675d8291c59d08c64a8f6a377b82, NAME => 'testtb-testExportExpiredSnapshot,1,1733181260456.9f38675d8291c59d08c64a8f6a377b82.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:14:20,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742243_1419 (size=68) 2024-12-02T23:14:20,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742243_1419 (size=68) 2024-12-02T23:14:20,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742244_1420 (size=68) 2024-12-02T23:14:20,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742243_1419 (size=68) 2024-12-02T23:14:20,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742244_1420 (size=68) 2024-12-02T23:14:20,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742244_1420 (size=68) 2024-12-02T23:14:20,475 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1733181260456.ad49f520e0d675103d5333bf70f6b644.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:14:20,475 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing ad49f520e0d675103d5333bf70f6b644, disabling compactions & flushes 2024-12-02T23:14:20,475 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1733181260456.ad49f520e0d675103d5333bf70f6b644. 2024-12-02T23:14:20,475 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1733181260456.ad49f520e0d675103d5333bf70f6b644. 2024-12-02T23:14:20,475 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1733181260456.ad49f520e0d675103d5333bf70f6b644. after waiting 0 ms 2024-12-02T23:14:20,475 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1733181260456.ad49f520e0d675103d5333bf70f6b644. 2024-12-02T23:14:20,475 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1733181260456.ad49f520e0d675103d5333bf70f6b644. 2024-12-02T23:14:20,475 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for ad49f520e0d675103d5333bf70f6b644: Waiting for close lock at 1733181260475Disabling compacts and flushes for region at 1733181260475Disabling writes for close at 1733181260475Writing region close event to WAL at 1733181260475Closed at 1733181260475 2024-12-02T23:14:20,475 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1733181260456.9f38675d8291c59d08c64a8f6a377b82.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:14:20,475 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing 9f38675d8291c59d08c64a8f6a377b82, disabling compactions & flushes 2024-12-02T23:14:20,475 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1733181260456.9f38675d8291c59d08c64a8f6a377b82. 2024-12-02T23:14:20,475 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1733181260456.9f38675d8291c59d08c64a8f6a377b82. 2024-12-02T23:14:20,475 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1733181260456.9f38675d8291c59d08c64a8f6a377b82. after waiting 0 ms 2024-12-02T23:14:20,475 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1733181260456.9f38675d8291c59d08c64a8f6a377b82. 2024-12-02T23:14:20,475 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1733181260456.9f38675d8291c59d08c64a8f6a377b82. 2024-12-02T23:14:20,475 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for 9f38675d8291c59d08c64a8f6a377b82: Waiting for close lock at 1733181260475Disabling compacts and flushes for region at 1733181260475Disabling writes for close at 1733181260475Writing region close event to WAL at 1733181260475Closed at 1733181260475 2024-12-02T23:14:20,476 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T23:14:20,476 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,,1733181260456.ad49f520e0d675103d5333bf70f6b644.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733181260476"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733181260476"}]},"ts":"1733181260476"} 2024-12-02T23:14:20,476 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,1,1733181260456.9f38675d8291c59d08c64a8f6a377b82.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733181260476"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733181260476"}]},"ts":"1733181260476"} 2024-12-02T23:14:20,478 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T23:14:20,479 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T23:14:20,479 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181260479"}]},"ts":"1733181260479"} 2024-12-02T23:14:20,480 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-02T23:14:20,481 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {9e5ab3b623d7=0} racks are {/default-rack=0} 2024-12-02T23:14:20,482 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T23:14:20,482 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T23:14:20,482 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T23:14:20,482 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T23:14:20,482 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T23:14:20,482 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T23:14:20,482 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T23:14:20,482 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T23:14:20,482 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T23:14:20,482 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T23:14:20,482 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=ad49f520e0d675103d5333bf70f6b644, ASSIGN}, {pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=9f38675d8291c59d08c64a8f6a377b82, ASSIGN}] 2024-12-02T23:14:20,483 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=9f38675d8291c59d08c64a8f6a377b82, ASSIGN 2024-12-02T23:14:20,483 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=ad49f520e0d675103d5333bf70f6b644, ASSIGN 2024-12-02T23:14:20,484 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=9f38675d8291c59d08c64a8f6a377b82, ASSIGN; state=OFFLINE, location=9e5ab3b623d7,39847,1733180945622; forceNewPlan=false, retain=false 2024-12-02T23:14:20,484 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=ad49f520e0d675103d5333bf70f6b644, ASSIGN; state=OFFLINE, location=9e5ab3b623d7,41311,1733180945828; forceNewPlan=false, retain=false 2024-12-02T23:14:20,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-02T23:14:20,634 INFO [9e5ab3b623d7:41513 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T23:14:20,634 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=9f38675d8291c59d08c64a8f6a377b82, regionState=OPENING, regionLocation=9e5ab3b623d7,39847,1733180945622 2024-12-02T23:14:20,634 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=ad49f520e0d675103d5333bf70f6b644, regionState=OPENING, regionLocation=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:14:20,636 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=9f38675d8291c59d08c64a8f6a377b82, ASSIGN because future has completed 2024-12-02T23:14:20,636 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9f38675d8291c59d08c64a8f6a377b82, server=9e5ab3b623d7,39847,1733180945622}] 2024-12-02T23:14:20,636 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=ad49f520e0d675103d5333bf70f6b644, ASSIGN because future has completed 2024-12-02T23:14:20,637 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=173, ppid=170, state=RUNNABLE, hasLock=false; OpenRegionProcedure ad49f520e0d675103d5333bf70f6b644, server=9e5ab3b623d7,41311,1733180945828}] 2024-12-02T23:14:20,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-02T23:14:20,791 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,,1733181260456.ad49f520e0d675103d5333bf70f6b644. 2024-12-02T23:14:20,791 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,1,1733181260456.9f38675d8291c59d08c64a8f6a377b82. 2024-12-02T23:14:20,791 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7752): Opening region: {ENCODED => 9f38675d8291c59d08c64a8f6a377b82, NAME => 'testtb-testExportExpiredSnapshot,1,1733181260456.9f38675d8291c59d08c64a8f6a377b82.', STARTKEY => '1', ENDKEY => ''} 2024-12-02T23:14:20,791 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7752): Opening region: {ENCODED => ad49f520e0d675103d5333bf70f6b644, NAME => 'testtb-testExportExpiredSnapshot,,1733181260456.ad49f520e0d675103d5333bf70f6b644.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T23:14:20,792 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,,1733181260456.ad49f520e0d675103d5333bf70f6b644. service=AccessControlService 2024-12-02T23:14:20,792 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,1,1733181260456.9f38675d8291c59d08c64a8f6a377b82. service=AccessControlService 2024-12-02T23:14:20,792 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T23:14:20,792 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 9f38675d8291c59d08c64a8f6a377b82 2024-12-02T23:14:20,792 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1733181260456.9f38675d8291c59d08c64a8f6a377b82.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:14:20,792 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7794): checking encryption for 9f38675d8291c59d08c64a8f6a377b82 2024-12-02T23:14:20,792 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7797): checking classloading for 9f38675d8291c59d08c64a8f6a377b82 2024-12-02T23:14:20,793 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T23:14:20,793 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot ad49f520e0d675103d5333bf70f6b644 2024-12-02T23:14:20,793 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1733181260456.ad49f520e0d675103d5333bf70f6b644.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:14:20,793 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7794): checking encryption for ad49f520e0d675103d5333bf70f6b644 2024-12-02T23:14:20,793 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7797): checking classloading for ad49f520e0d675103d5333bf70f6b644 2024-12-02T23:14:20,794 INFO [StoreOpener-9f38675d8291c59d08c64a8f6a377b82-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 9f38675d8291c59d08c64a8f6a377b82 2024-12-02T23:14:20,794 INFO [StoreOpener-ad49f520e0d675103d5333bf70f6b644-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ad49f520e0d675103d5333bf70f6b644 2024-12-02T23:14:20,796 INFO [StoreOpener-ad49f520e0d675103d5333bf70f6b644-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ad49f520e0d675103d5333bf70f6b644 columnFamilyName cf 2024-12-02T23:14:20,796 INFO [StoreOpener-9f38675d8291c59d08c64a8f6a377b82-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9f38675d8291c59d08c64a8f6a377b82 columnFamilyName cf 2024-12-02T23:14:20,798 DEBUG [StoreOpener-ad49f520e0d675103d5333bf70f6b644-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:14:20,798 DEBUG [StoreOpener-9f38675d8291c59d08c64a8f6a377b82-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:14:20,799 INFO [StoreOpener-ad49f520e0d675103d5333bf70f6b644-1 {}] regionserver.HStore(327): Store=ad49f520e0d675103d5333bf70f6b644/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T23:14:20,799 INFO [StoreOpener-9f38675d8291c59d08c64a8f6a377b82-1 {}] regionserver.HStore(327): Store=9f38675d8291c59d08c64a8f6a377b82/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T23:14:20,800 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1038): replaying wal for 9f38675d8291c59d08c64a8f6a377b82 2024-12-02T23:14:20,800 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1038): replaying wal for ad49f520e0d675103d5333bf70f6b644 2024-12-02T23:14:20,800 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportExpiredSnapshot/9f38675d8291c59d08c64a8f6a377b82 2024-12-02T23:14:20,800 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportExpiredSnapshot/ad49f520e0d675103d5333bf70f6b644 2024-12-02T23:14:20,801 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportExpiredSnapshot/9f38675d8291c59d08c64a8f6a377b82 2024-12-02T23:14:20,801 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportExpiredSnapshot/ad49f520e0d675103d5333bf70f6b644 2024-12-02T23:14:20,801 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1048): stopping wal replay for 9f38675d8291c59d08c64a8f6a377b82 2024-12-02T23:14:20,801 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1060): Cleaning up temporary data for 9f38675d8291c59d08c64a8f6a377b82 2024-12-02T23:14:20,801 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1048): stopping wal replay for ad49f520e0d675103d5333bf70f6b644 2024-12-02T23:14:20,801 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1060): Cleaning up temporary data for ad49f520e0d675103d5333bf70f6b644 2024-12-02T23:14:20,803 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1093): writing seq id for 9f38675d8291c59d08c64a8f6a377b82 2024-12-02T23:14:20,803 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1093): writing seq id for ad49f520e0d675103d5333bf70f6b644 2024-12-02T23:14:20,805 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportExpiredSnapshot/9f38675d8291c59d08c64a8f6a377b82/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T23:14:20,805 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportExpiredSnapshot/ad49f520e0d675103d5333bf70f6b644/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T23:14:20,806 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1114): Opened 9f38675d8291c59d08c64a8f6a377b82; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74309221, jitterRate=0.10729368031024933}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T23:14:20,806 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9f38675d8291c59d08c64a8f6a377b82 2024-12-02T23:14:20,806 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1114): Opened ad49f520e0d675103d5333bf70f6b644; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71363375, jitterRate=0.06339715421199799}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T23:14:20,806 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ad49f520e0d675103d5333bf70f6b644 2024-12-02T23:14:20,806 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1006): Region open journal for 9f38675d8291c59d08c64a8f6a377b82: Running coprocessor pre-open hook at 1733181260792Writing region info on filesystem at 1733181260793 (+1 ms)Initializing all the Stores at 1733181260794 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733181260794Cleaning up temporary data from old regions at 1733181260801 (+7 ms)Running coprocessor post-open hooks at 1733181260806 (+5 ms)Region opened successfully at 1733181260806 2024-12-02T23:14:20,806 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1006): Region open journal for ad49f520e0d675103d5333bf70f6b644: Running coprocessor pre-open hook at 1733181260793Writing region info on filesystem at 1733181260793Initializing all the Stores at 1733181260794 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733181260794Cleaning up temporary data from old regions at 1733181260801 (+7 ms)Running coprocessor post-open hooks at 1733181260806 (+5 ms)Region opened successfully at 1733181260806 2024-12-02T23:14:20,807 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,,1733181260456.ad49f520e0d675103d5333bf70f6b644., pid=173, masterSystemTime=1733181260789 2024-12-02T23:14:20,807 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,1,1733181260456.9f38675d8291c59d08c64a8f6a377b82., pid=172, masterSystemTime=1733181260788 2024-12-02T23:14:20,809 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,,1733181260456.ad49f520e0d675103d5333bf70f6b644. 2024-12-02T23:14:20,809 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,,1733181260456.ad49f520e0d675103d5333bf70f6b644. 2024-12-02T23:14:20,809 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=ad49f520e0d675103d5333bf70f6b644, regionState=OPEN, openSeqNum=2, regionLocation=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:14:20,809 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,1,1733181260456.9f38675d8291c59d08c64a8f6a377b82. 2024-12-02T23:14:20,810 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,1,1733181260456.9f38675d8291c59d08c64a8f6a377b82. 2024-12-02T23:14:20,811 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=173, ppid=170, state=RUNNABLE, hasLock=false; OpenRegionProcedure ad49f520e0d675103d5333bf70f6b644, server=9e5ab3b623d7,41311,1733180945828 because future has completed 2024-12-02T23:14:20,812 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=9f38675d8291c59d08c64a8f6a377b82, regionState=OPEN, openSeqNum=2, regionLocation=9e5ab3b623d7,39847,1733180945622 2024-12-02T23:14:20,814 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=172, ppid=171, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9f38675d8291c59d08c64a8f6a377b82, server=9e5ab3b623d7,39847,1733180945622 because future has completed 2024-12-02T23:14:20,816 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=173, resume processing ppid=170 2024-12-02T23:14:20,816 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=173, ppid=170, state=SUCCESS, hasLock=false; OpenRegionProcedure ad49f520e0d675103d5333bf70f6b644, server=9e5ab3b623d7,41311,1733180945828 in 176 msec 2024-12-02T23:14:20,818 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=172, resume processing ppid=171 2024-12-02T23:14:20,818 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=170, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=ad49f520e0d675103d5333bf70f6b644, ASSIGN in 334 msec 2024-12-02T23:14:20,818 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=172, ppid=171, state=SUCCESS, hasLock=false; OpenRegionProcedure 9f38675d8291c59d08c64a8f6a377b82, server=9e5ab3b623d7,39847,1733180945622 in 179 msec 2024-12-02T23:14:20,820 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=171, resume processing ppid=169 2024-12-02T23:14:20,820 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=171, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=9f38675d8291c59d08c64a8f6a377b82, ASSIGN in 336 msec 2024-12-02T23:14:20,821 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T23:14:20,821 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181260821"}]},"ts":"1733181260821"} 2024-12-02T23:14:20,823 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-02T23:14:20,826 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T23:14:20,826 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportExpiredSnapshot jenkins: RWXCA 2024-12-02T23:14:20,830 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41311 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-02T23:14:20,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:14:20,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:14:20,839 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:14:20,839 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:14:20,841 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:20,841 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:20,843 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=169, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot in 384 msec 2024-12-02T23:14:20,846 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:20,846 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:21,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-02T23:14:21,082 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-02T23:14:21,083 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T23:14:21,086 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-12-02T23:14:21,086 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1733181260456.ad49f520e0d675103d5333bf70f6b644. 2024-12-02T23:14:21,086 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T23:14:21,088 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T23:14:21,093 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T23:14:21,098 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T23:14:21,100 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-02T23:14:21,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733181261100 (current time:1733181261100). 2024-12-02T23:14:21,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T23:14:21,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-02T23:14:21,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T23:14:21,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54175ebb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:21,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:14:21,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:14:21,102 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:14:21,102 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:14:21,102 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:14:21,102 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1531f479, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:21,102 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:14:21,102 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:14:21,102 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:21,103 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39312, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:14:21,104 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5123d052, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:21,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:14:21,105 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:14:21,105 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:14:21,106 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49648, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:14:21,107 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:14:21,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:14:21,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:21,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:21,107 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:14:21,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a00cfce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:21,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:14:21,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:14:21,108 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:14:21,108 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:14:21,108 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:14:21,109 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d6fe426, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:21,109 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:14:21,109 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:14:21,109 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:21,109 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39338, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:14:21,110 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@194a3e8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:21,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:14:21,111 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:14:21,111 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:14:21,112 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49650, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:14:21,113 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b., hostname=9e5ab3b623d7,41311,1733180945828, seqNum=2] 2024-12-02T23:14:21,113 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:14:21,114 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52666, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:14:21,116 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:14:21,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor223.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:14:21,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:21,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:21,116 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:14:21,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-02T23:14:21,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T23:14:21,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=174, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-02T23:14:21,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 174 2024-12-02T23:14:21,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-02T23:14:21,119 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T23:14:21,120 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T23:14:21,122 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T23:14:21,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742245_1421 (size=170) 2024-12-02T23:14:21,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742245_1421 (size=170) 2024-12-02T23:14:21,131 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T23:14:21,131 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ad49f520e0d675103d5333bf70f6b644}, {pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9f38675d8291c59d08c64a8f6a377b82}] 2024-12-02T23:14:21,132 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ad49f520e0d675103d5333bf70f6b644 2024-12-02T23:14:21,132 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9f38675d8291c59d08c64a8f6a377b82 2024-12-02T23:14:21,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742245_1421 (size=170) 2024-12-02T23:14:21,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-02T23:14:21,284 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41311 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=175 2024-12-02T23:14:21,284 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39847 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=176 2024-12-02T23:14:21,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1733181260456.9f38675d8291c59d08c64a8f6a377b82. 2024-12-02T23:14:21,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1733181260456.ad49f520e0d675103d5333bf70f6b644. 2024-12-02T23:14:21,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.HRegion(2603): Flush status journal for 9f38675d8291c59d08c64a8f6a377b82: 2024-12-02T23:14:21,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.HRegion(2603): Flush status journal for ad49f520e0d675103d5333bf70f6b644: 2024-12-02T23:14:21,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1733181260456.9f38675d8291c59d08c64a8f6a377b82. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-02T23:14:21,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1733181260456.ad49f520e0d675103d5333bf70f6b644. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-02T23:14:21,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1733181260456.9f38675d8291c59d08c64a8f6a377b82.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-02T23:14:21,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1733181260456.ad49f520e0d675103d5333bf70f6b644.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-02T23:14:21,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:14:21,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:14:21,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T23:14:21,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T23:14:21,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742247_1423 (size=71) 2024-12-02T23:14:21,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742247_1423 (size=71) 2024-12-02T23:14:21,294 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1733181260456.ad49f520e0d675103d5333bf70f6b644. 2024-12-02T23:14:21,294 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=175 2024-12-02T23:14:21,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742246_1422 (size=71) 2024-12-02T23:14:21,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=175 2024-12-02T23:14:21,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742246_1422 (size=71) 2024-12-02T23:14:21,295 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region ad49f520e0d675103d5333bf70f6b644 2024-12-02T23:14:21,295 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ad49f520e0d675103d5333bf70f6b644 2024-12-02T23:14:21,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742247_1423 (size=71) 2024-12-02T23:14:21,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742246_1422 (size=71) 2024-12-02T23:14:21,300 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=175, ppid=174, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ad49f520e0d675103d5333bf70f6b644 in 166 msec 2024-12-02T23:14:21,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-02T23:14:21,696 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1733181260456.9f38675d8291c59d08c64a8f6a377b82. 2024-12-02T23:14:21,696 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-12-02T23:14:21,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=176 2024-12-02T23:14:21,697 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 9f38675d8291c59d08c64a8f6a377b82 2024-12-02T23:14:21,697 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9f38675d8291c59d08c64a8f6a377b82 2024-12-02T23:14:21,700 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=176, resume processing ppid=174 2024-12-02T23:14:21,700 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=176, ppid=174, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 9f38675d8291c59d08c64a8f6a377b82 in 567 msec 2024-12-02T23:14:21,700 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T23:14:21,701 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T23:14:21,702 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T23:14:21,702 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T23:14:21,703 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:14:21,703 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-02T23:14:21,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742248_1424 (size=63) 2024-12-02T23:14:21,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742248_1424 (size=63) 2024-12-02T23:14:21,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742248_1424 (size=63) 2024-12-02T23:14:21,722 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T23:14:21,722 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportExpiredSnapshot 2024-12-02T23:14:21,723 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot 2024-12-02T23:14:21,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-02T23:14:21,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742249_1425 (size=653) 2024-12-02T23:14:21,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742249_1425 (size=653) 2024-12-02T23:14:21,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742249_1425 (size=653) 2024-12-02T23:14:21,768 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T23:14:21,773 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T23:14:21,773 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/emptySnaptb0-testExportExpiredSnapshot 2024-12-02T23:14:21,775 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T23:14:21,775 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 174 2024-12-02T23:14:21,777 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=174, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 658 msec 2024-12-02T23:14:22,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-02T23:14:22,252 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-02T23:14:22,261 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41311 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,,1733181260456.ad49f520e0d675103d5333bf70f6b644. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T23:14:22,262 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39847 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,1,1733181260456.9f38675d8291c59d08c64a8f6a377b82. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T23:14:22,263 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T23:14:22,266 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-12-02T23:14:22,266 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1733181260456.ad49f520e0d675103d5333bf70f6b644. 2024-12-02T23:14:22,266 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T23:14:22,267 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T23:14:22,271 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T23:14:22,276 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T23:14:22,278 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-02T23:14:22,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733181262278 (current time:1733181262278). 2024-12-02T23:14:22,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T23:14:22,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-02T23:14:22,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T23:14:22,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34df9bf3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:22,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:14:22,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:14:22,279 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:14:22,280 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:14:22,280 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:14:22,280 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17805e47, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:22,280 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:14:22,280 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:14:22,280 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:22,281 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42888, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:14:22,281 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4529948f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:22,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:14:22,282 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:14:22,282 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:14:22,283 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59790, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:14:22,284 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:14:22,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:14:22,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:22,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:22,284 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:14:22,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3937782d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:22,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:14:22,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:14:22,285 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:14:22,285 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:14:22,285 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:14:22,286 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5872e892, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:22,286 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:14:22,286 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:14:22,286 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:22,286 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42894, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:14:22,287 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39c0631a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:22,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:14:22,287 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:14:22,288 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:14:22,288 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59794, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:14:22,289 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b., hostname=9e5ab3b623d7,41311,1733180945828, seqNum=2] 2024-12-02T23:14:22,290 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:14:22,290 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:44922, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:14:22,291 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:14:22,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor223.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:14:22,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:22,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:22,291 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:14:22,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-02T23:14:22,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T23:14:22,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=177, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-02T23:14:22,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 177 2024-12-02T23:14:22,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-02T23:14:22,294 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T23:14:22,294 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T23:14:22,296 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T23:14:22,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742250_1426 (size=165) 2024-12-02T23:14:22,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742250_1426 (size=165) 2024-12-02T23:14:22,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742250_1426 (size=165) 2024-12-02T23:14:22,302 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T23:14:22,303 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ad49f520e0d675103d5333bf70f6b644}, {pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9f38675d8291c59d08c64a8f6a377b82}] 2024-12-02T23:14:22,303 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ad49f520e0d675103d5333bf70f6b644 2024-12-02T23:14:22,303 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9f38675d8291c59d08c64a8f6a377b82 2024-12-02T23:14:22,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-02T23:14:22,455 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39847 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=179 2024-12-02T23:14:22,455 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41311 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=178 2024-12-02T23:14:22,455 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1733181260456.ad49f520e0d675103d5333bf70f6b644. 2024-12-02T23:14:22,455 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1733181260456.9f38675d8291c59d08c64a8f6a377b82. 2024-12-02T23:14:22,455 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(2902): Flushing ad49f520e0d675103d5333bf70f6b644 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-02T23:14:22,455 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(2902): Flushing 9f38675d8291c59d08c64a8f6a377b82 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-02T23:14:22,480 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202a5283eb847ba418eb8a57733051acd04_ad49f520e0d675103d5333bf70f6b644 is 71, key is 04ee19ddb2ac9f87cb90ecab6d19772b/cf:q/1733181262261/Put/seqid=0 2024-12-02T23:14:22,481 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241202186d93f50b47476c8318302c3831dac3_9f38675d8291c59d08c64a8f6a377b82 is 71, key is 14aa445f41e2081754ae74845e5ded05/cf:q/1733181262262/Put/seqid=0 2024-12-02T23:14:22,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742251_1427 (size=5171) 2024-12-02T23:14:22,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742252_1428 (size=8101) 2024-12-02T23:14:22,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742252_1428 (size=8101) 2024-12-02T23:14:22,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742251_1427 (size=5171) 2024-12-02T23:14:22,487 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:14:22,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742252_1428 (size=8101) 2024-12-02T23:14:22,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742251_1427 (size=5171) 2024-12-02T23:14:22,488 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:14:22,491 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241202186d93f50b47476c8318302c3831dac3_9f38675d8291c59d08c64a8f6a377b82 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241202186d93f50b47476c8318302c3831dac3_9f38675d8291c59d08c64a8f6a377b82 2024-12-02T23:14:22,491 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202a5283eb847ba418eb8a57733051acd04_ad49f520e0d675103d5333bf70f6b644 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241202a5283eb847ba418eb8a57733051acd04_ad49f520e0d675103d5333bf70f6b644 2024-12-02T23:14:22,492 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportExpiredSnapshot/9f38675d8291c59d08c64a8f6a377b82/.tmp/cf/cc89779099c24f2bb3bfb258261a1c8d, store: [table=testtb-testExportExpiredSnapshot family=cf region=9f38675d8291c59d08c64a8f6a377b82] 2024-12-02T23:14:22,492 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportExpiredSnapshot/ad49f520e0d675103d5333bf70f6b644/.tmp/cf/8ba5541674bc42549199d1d6a3bfc170, store: [table=testtb-testExportExpiredSnapshot family=cf region=ad49f520e0d675103d5333bf70f6b644] 2024-12-02T23:14:22,493 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportExpiredSnapshot/9f38675d8291c59d08c64a8f6a377b82/.tmp/cf/cc89779099c24f2bb3bfb258261a1c8d is 209, key is 15e60ed66c83d4b0bb8975a92737786c4/cf:q/1733181262262/Put/seqid=0 2024-12-02T23:14:22,493 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportExpiredSnapshot/ad49f520e0d675103d5333bf70f6b644/.tmp/cf/8ba5541674bc42549199d1d6a3bfc170 is 209, key is 0a9de9b25fbcf0af2649c866a096ba600/cf:q/1733181262261/Put/seqid=0 2024-12-02T23:14:22,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742253_1429 (size=14792) 2024-12-02T23:14:22,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742253_1429 (size=14792) 2024-12-02T23:14:22,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742253_1429 (size=14792) 2024-12-02T23:14:22,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742254_1430 (size=6121) 2024-12-02T23:14:22,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742254_1430 (size=6121) 2024-12-02T23:14:22,498 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportExpiredSnapshot/9f38675d8291c59d08c64a8f6a377b82/.tmp/cf/cc89779099c24f2bb3bfb258261a1c8d 2024-12-02T23:14:22,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742254_1430 (size=6121) 2024-12-02T23:14:22,498 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=266, hasBloomFilter=true, into tmp file hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportExpiredSnapshot/ad49f520e0d675103d5333bf70f6b644/.tmp/cf/8ba5541674bc42549199d1d6a3bfc170 2024-12-02T23:14:22,503 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportExpiredSnapshot/9f38675d8291c59d08c64a8f6a377b82/.tmp/cf/cc89779099c24f2bb3bfb258261a1c8d as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportExpiredSnapshot/9f38675d8291c59d08c64a8f6a377b82/cf/cc89779099c24f2bb3bfb258261a1c8d 2024-12-02T23:14:22,503 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportExpiredSnapshot/ad49f520e0d675103d5333bf70f6b644/.tmp/cf/8ba5541674bc42549199d1d6a3bfc170 as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportExpiredSnapshot/ad49f520e0d675103d5333bf70f6b644/cf/8ba5541674bc42549199d1d6a3bfc170 2024-12-02T23:14:22,507 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportExpiredSnapshot/ad49f520e0d675103d5333bf70f6b644/cf/8ba5541674bc42549199d1d6a3bfc170, entries=4, sequenceid=6, filesize=6.0 K 2024-12-02T23:14:22,507 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportExpiredSnapshot/9f38675d8291c59d08c64a8f6a377b82/cf/cc89779099c24f2bb3bfb258261a1c8d, entries=46, sequenceid=6, filesize=14.4 K 2024-12-02T23:14:22,508 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for ad49f520e0d675103d5333bf70f6b644 in 53ms, sequenceid=6, compaction requested=false 2024-12-02T23:14:22,508 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 9f38675d8291c59d08c64a8f6a377b82 in 53ms, sequenceid=6, compaction requested=false 2024-12-02T23:14:22,508 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-12-02T23:14:22,508 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-12-02T23:14:22,508 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(2603): Flush status journal for 9f38675d8291c59d08c64a8f6a377b82: 2024-12-02T23:14:22,508 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1733181260456.9f38675d8291c59d08c64a8f6a377b82. for snaptb0-testExportExpiredSnapshot completed. 2024-12-02T23:14:22,508 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(2603): Flush status journal for ad49f520e0d675103d5333bf70f6b644: 2024-12-02T23:14:22,508 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1733181260456.ad49f520e0d675103d5333bf70f6b644. for snaptb0-testExportExpiredSnapshot completed. 2024-12-02T23:14:22,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1733181260456.9f38675d8291c59d08c64a8f6a377b82.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-02T23:14:22,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1733181260456.ad49f520e0d675103d5333bf70f6b644.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-02T23:14:22,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:14:22,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:14:22,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportExpiredSnapshot/9f38675d8291c59d08c64a8f6a377b82/cf/cc89779099c24f2bb3bfb258261a1c8d] hfiles 2024-12-02T23:14:22,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportExpiredSnapshot/ad49f520e0d675103d5333bf70f6b644/cf/8ba5541674bc42549199d1d6a3bfc170] hfiles 2024-12-02T23:14:22,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportExpiredSnapshot/9f38675d8291c59d08c64a8f6a377b82/cf/cc89779099c24f2bb3bfb258261a1c8d for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-02T23:14:22,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportExpiredSnapshot/ad49f520e0d675103d5333bf70f6b644/cf/8ba5541674bc42549199d1d6a3bfc170 for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-02T23:14:22,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742256_1432 (size=110) 2024-12-02T23:14:22,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742255_1431 (size=110) 2024-12-02T23:14:22,519 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1733181260456.ad49f520e0d675103d5333bf70f6b644. 2024-12-02T23:14:22,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742255_1431 (size=110) 2024-12-02T23:14:22,519 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-12-02T23:14:22,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742255_1431 (size=110) 2024-12-02T23:14:22,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742256_1432 (size=110) 2024-12-02T23:14:22,519 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1733181260456.9f38675d8291c59d08c64a8f6a377b82. 2024-12-02T23:14:22,519 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=179 2024-12-02T23:14:22,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742256_1432 (size=110) 2024-12-02T23:14:22,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=179 2024-12-02T23:14:22,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=178 2024-12-02T23:14:22,519 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region ad49f520e0d675103d5333bf70f6b644 2024-12-02T23:14:22,519 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 9f38675d8291c59d08c64a8f6a377b82 2024-12-02T23:14:22,519 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ad49f520e0d675103d5333bf70f6b644 2024-12-02T23:14:22,520 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9f38675d8291c59d08c64a8f6a377b82 2024-12-02T23:14:22,523 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=178, ppid=177, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ad49f520e0d675103d5333bf70f6b644 in 219 msec 2024-12-02T23:14:22,524 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=179, resume processing ppid=177 2024-12-02T23:14:22,524 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T23:14:22,524 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=179, ppid=177, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 9f38675d8291c59d08c64a8f6a377b82 in 219 msec 2024-12-02T23:14:22,525 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T23:14:22,526 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T23:14:22,526 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T23:14:22,526 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:14:22,527 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241202186d93f50b47476c8318302c3831dac3_9f38675d8291c59d08c64a8f6a377b82, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241202a5283eb847ba418eb8a57733051acd04_ad49f520e0d675103d5333bf70f6b644] hfiles 2024-12-02T23:14:22,527 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241202186d93f50b47476c8318302c3831dac3_9f38675d8291c59d08c64a8f6a377b82 2024-12-02T23:14:22,527 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241202a5283eb847ba418eb8a57733051acd04_ad49f520e0d675103d5333bf70f6b644 2024-12-02T23:14:22,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742257_1433 (size=294) 2024-12-02T23:14:22,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742257_1433 (size=294) 2024-12-02T23:14:22,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742257_1433 (size=294) 2024-12-02T23:14:22,533 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T23:14:22,533 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportExpiredSnapshot 2024-12-02T23:14:22,534 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot 2024-12-02T23:14:22,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742258_1434 (size=963) 2024-12-02T23:14:22,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742258_1434 (size=963) 2024-12-02T23:14:22,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742258_1434 (size=963) 2024-12-02T23:14:22,546 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T23:14:22,550 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T23:14:22,551 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testExportExpiredSnapshot 2024-12-02T23:14:22,552 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T23:14:22,552 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 177 2024-12-02T23:14:22,553 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=177, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 260 msec 2024-12-02T23:14:22,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-02T23:14:22,612 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-02T23:14:22,613 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T23:14:22,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot 2024-12-02T23:14:22,615 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T23:14:22,615 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testExportExpiredSnapshot" procId is: 180 2024-12-02T23:14:22,615 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T23:14:22,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-02T23:14:22,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742259_1435 (size=436) 2024-12-02T23:14:22,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742259_1435 (size=436) 2024-12-02T23:14:22,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742259_1435 (size=436) 2024-12-02T23:14:22,623 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 74f0c13379d206ce1601bf0b945b9ba8, NAME => 'testExportExpiredSnapshot,,1733181262613.74f0c13379d206ce1601bf0b945b9ba8.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:14:22,623 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 53b6b7fd5134f3f13f85c7ab43d0a24d, NAME => 'testExportExpiredSnapshot,1,1733181262613.53b6b7fd5134f3f13f85c7ab43d0a24d.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:14:22,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742261_1437 (size=61) 2024-12-02T23:14:22,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742261_1437 (size=61) 2024-12-02T23:14:22,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742260_1436 (size=61) 2024-12-02T23:14:22,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742260_1436 (size=61) 2024-12-02T23:14:22,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742261_1437 (size=61) 2024-12-02T23:14:22,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742260_1436 (size=61) 2024-12-02T23:14:22,631 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1733181262613.74f0c13379d206ce1601bf0b945b9ba8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:14:22,631 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1733181262613.53b6b7fd5134f3f13f85c7ab43d0a24d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:14:22,631 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing 74f0c13379d206ce1601bf0b945b9ba8, disabling compactions & flushes 2024-12-02T23:14:22,631 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing 53b6b7fd5134f3f13f85c7ab43d0a24d, disabling compactions & flushes 2024-12-02T23:14:22,631 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1733181262613.74f0c13379d206ce1601bf0b945b9ba8. 2024-12-02T23:14:22,631 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1733181262613.53b6b7fd5134f3f13f85c7ab43d0a24d. 2024-12-02T23:14:22,631 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1733181262613.74f0c13379d206ce1601bf0b945b9ba8. 2024-12-02T23:14:22,631 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1733181262613.53b6b7fd5134f3f13f85c7ab43d0a24d. 2024-12-02T23:14:22,631 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1733181262613.74f0c13379d206ce1601bf0b945b9ba8. after waiting 0 ms 2024-12-02T23:14:22,631 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1733181262613.53b6b7fd5134f3f13f85c7ab43d0a24d. after waiting 0 ms 2024-12-02T23:14:22,631 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1733181262613.74f0c13379d206ce1601bf0b945b9ba8. 2024-12-02T23:14:22,631 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1733181262613.53b6b7fd5134f3f13f85c7ab43d0a24d. 2024-12-02T23:14:22,631 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1733181262613.74f0c13379d206ce1601bf0b945b9ba8. 2024-12-02T23:14:22,631 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1733181262613.53b6b7fd5134f3f13f85c7ab43d0a24d. 2024-12-02T23:14:22,631 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for 74f0c13379d206ce1601bf0b945b9ba8: Waiting for close lock at 1733181262631Disabling compacts and flushes for region at 1733181262631Disabling writes for close at 1733181262631Writing region close event to WAL at 1733181262631Closed at 1733181262631 2024-12-02T23:14:22,631 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for 53b6b7fd5134f3f13f85c7ab43d0a24d: Waiting for close lock at 1733181262631Disabling compacts and flushes for region at 1733181262631Disabling writes for close at 1733181262631Writing region close event to WAL at 1733181262631Closed at 1733181262631 2024-12-02T23:14:22,632 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T23:14:22,632 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,,1733181262613.74f0c13379d206ce1601bf0b945b9ba8.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1733181262632"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733181262632"}]},"ts":"1733181262632"} 2024-12-02T23:14:22,632 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,1,1733181262613.53b6b7fd5134f3f13f85c7ab43d0a24d.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1733181262632"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733181262632"}]},"ts":"1733181262632"} 2024-12-02T23:14:22,634 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T23:14:22,634 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T23:14:22,635 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181262634"}]},"ts":"1733181262634"} 2024-12-02T23:14:22,636 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-02T23:14:22,636 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {9e5ab3b623d7=0} racks are {/default-rack=0} 2024-12-02T23:14:22,637 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T23:14:22,637 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T23:14:22,637 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T23:14:22,637 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T23:14:22,637 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T23:14:22,637 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T23:14:22,637 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T23:14:22,637 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T23:14:22,637 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T23:14:22,637 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T23:14:22,637 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=74f0c13379d206ce1601bf0b945b9ba8, ASSIGN}, {pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=53b6b7fd5134f3f13f85c7ab43d0a24d, ASSIGN}] 2024-12-02T23:14:22,638 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=53b6b7fd5134f3f13f85c7ab43d0a24d, ASSIGN 2024-12-02T23:14:22,638 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=74f0c13379d206ce1601bf0b945b9ba8, ASSIGN 2024-12-02T23:14:22,639 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=74f0c13379d206ce1601bf0b945b9ba8, ASSIGN; state=OFFLINE, location=9e5ab3b623d7,39847,1733180945622; forceNewPlan=false, retain=false 2024-12-02T23:14:22,639 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=53b6b7fd5134f3f13f85c7ab43d0a24d, ASSIGN; state=OFFLINE, location=9e5ab3b623d7,38049,1733180945779; forceNewPlan=false, retain=false 2024-12-02T23:14:22,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-02T23:14:22,789 INFO [9e5ab3b623d7:41513 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T23:14:22,790 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=182 updating hbase:meta row=53b6b7fd5134f3f13f85c7ab43d0a24d, regionState=OPENING, regionLocation=9e5ab3b623d7,38049,1733180945779 2024-12-02T23:14:22,790 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=181 updating hbase:meta row=74f0c13379d206ce1601bf0b945b9ba8, regionState=OPENING, regionLocation=9e5ab3b623d7,39847,1733180945622 2024-12-02T23:14:22,791 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=74f0c13379d206ce1601bf0b945b9ba8, ASSIGN because future has completed 2024-12-02T23:14:22,792 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=183, ppid=181, state=RUNNABLE, hasLock=false; OpenRegionProcedure 74f0c13379d206ce1601bf0b945b9ba8, server=9e5ab3b623d7,39847,1733180945622}] 2024-12-02T23:14:22,792 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=53b6b7fd5134f3f13f85c7ab43d0a24d, ASSIGN because future has completed 2024-12-02T23:14:22,793 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=184, ppid=182, state=RUNNABLE, hasLock=false; OpenRegionProcedure 53b6b7fd5134f3f13f85c7ab43d0a24d, server=9e5ab3b623d7,38049,1733180945779}] 2024-12-02T23:14:22,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-02T23:14:22,947 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,,1733181262613.74f0c13379d206ce1601bf0b945b9ba8. 2024-12-02T23:14:22,947 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7752): Opening region: {ENCODED => 74f0c13379d206ce1601bf0b945b9ba8, NAME => 'testExportExpiredSnapshot,,1733181262613.74f0c13379d206ce1601bf0b945b9ba8.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T23:14:22,948 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,,1733181262613.74f0c13379d206ce1601bf0b945b9ba8. service=AccessControlService 2024-12-02T23:14:22,948 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,1,1733181262613.53b6b7fd5134f3f13f85c7ab43d0a24d. 2024-12-02T23:14:22,948 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7752): Opening region: {ENCODED => 53b6b7fd5134f3f13f85c7ab43d0a24d, NAME => 'testExportExpiredSnapshot,1,1733181262613.53b6b7fd5134f3f13f85c7ab43d0a24d.', STARTKEY => '1', ENDKEY => ''} 2024-12-02T23:14:22,948 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T23:14:22,948 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,1,1733181262613.53b6b7fd5134f3f13f85c7ab43d0a24d. service=AccessControlService 2024-12-02T23:14:22,948 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 74f0c13379d206ce1601bf0b945b9ba8 2024-12-02T23:14:22,948 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T23:14:22,948 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1733181262613.74f0c13379d206ce1601bf0b945b9ba8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:14:22,948 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 53b6b7fd5134f3f13f85c7ab43d0a24d 2024-12-02T23:14:22,948 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1733181262613.53b6b7fd5134f3f13f85c7ab43d0a24d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:14:22,948 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7794): checking encryption for 74f0c13379d206ce1601bf0b945b9ba8 2024-12-02T23:14:22,948 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7797): checking classloading for 74f0c13379d206ce1601bf0b945b9ba8 2024-12-02T23:14:22,948 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7794): checking encryption for 53b6b7fd5134f3f13f85c7ab43d0a24d 2024-12-02T23:14:22,948 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7797): checking classloading for 53b6b7fd5134f3f13f85c7ab43d0a24d 2024-12-02T23:14:22,950 INFO [StoreOpener-74f0c13379d206ce1601bf0b945b9ba8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 74f0c13379d206ce1601bf0b945b9ba8 2024-12-02T23:14:22,950 INFO [StoreOpener-53b6b7fd5134f3f13f85c7ab43d0a24d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 53b6b7fd5134f3f13f85c7ab43d0a24d 2024-12-02T23:14:22,951 INFO [StoreOpener-74f0c13379d206ce1601bf0b945b9ba8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 74f0c13379d206ce1601bf0b945b9ba8 columnFamilyName cf 2024-12-02T23:14:22,951 INFO [StoreOpener-53b6b7fd5134f3f13f85c7ab43d0a24d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 53b6b7fd5134f3f13f85c7ab43d0a24d columnFamilyName cf 2024-12-02T23:14:22,952 DEBUG [StoreOpener-53b6b7fd5134f3f13f85c7ab43d0a24d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:14:22,952 DEBUG [StoreOpener-74f0c13379d206ce1601bf0b945b9ba8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:14:22,952 INFO [StoreOpener-74f0c13379d206ce1601bf0b945b9ba8-1 {}] regionserver.HStore(327): Store=74f0c13379d206ce1601bf0b945b9ba8/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T23:14:22,952 INFO [StoreOpener-53b6b7fd5134f3f13f85c7ab43d0a24d-1 {}] regionserver.HStore(327): Store=53b6b7fd5134f3f13f85c7ab43d0a24d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T23:14:22,953 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1038): replaying wal for 74f0c13379d206ce1601bf0b945b9ba8 2024-12-02T23:14:22,953 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1038): replaying wal for 53b6b7fd5134f3f13f85c7ab43d0a24d 2024-12-02T23:14:22,953 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportExpiredSnapshot/74f0c13379d206ce1601bf0b945b9ba8 2024-12-02T23:14:22,953 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportExpiredSnapshot/53b6b7fd5134f3f13f85c7ab43d0a24d 2024-12-02T23:14:22,954 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportExpiredSnapshot/74f0c13379d206ce1601bf0b945b9ba8 2024-12-02T23:14:22,954 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportExpiredSnapshot/53b6b7fd5134f3f13f85c7ab43d0a24d 2024-12-02T23:14:22,954 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1048): stopping wal replay for 74f0c13379d206ce1601bf0b945b9ba8 2024-12-02T23:14:22,954 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1048): stopping wal replay for 53b6b7fd5134f3f13f85c7ab43d0a24d 2024-12-02T23:14:22,954 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1060): Cleaning up temporary data for 74f0c13379d206ce1601bf0b945b9ba8 2024-12-02T23:14:22,954 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1060): Cleaning up temporary data for 53b6b7fd5134f3f13f85c7ab43d0a24d 2024-12-02T23:14:22,955 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1093): writing seq id for 53b6b7fd5134f3f13f85c7ab43d0a24d 2024-12-02T23:14:22,955 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1093): writing seq id for 74f0c13379d206ce1601bf0b945b9ba8 2024-12-02T23:14:22,957 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportExpiredSnapshot/53b6b7fd5134f3f13f85c7ab43d0a24d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T23:14:22,957 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportExpiredSnapshot/74f0c13379d206ce1601bf0b945b9ba8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T23:14:22,957 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1114): Opened 74f0c13379d206ce1601bf0b945b9ba8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65045642, jitterRate=-0.03074440360069275}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T23:14:22,957 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 74f0c13379d206ce1601bf0b945b9ba8 2024-12-02T23:14:22,957 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1114): Opened 53b6b7fd5134f3f13f85c7ab43d0a24d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63215526, jitterRate=-0.058015257120132446}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T23:14:22,957 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 53b6b7fd5134f3f13f85c7ab43d0a24d 2024-12-02T23:14:22,958 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1006): Region open journal for 74f0c13379d206ce1601bf0b945b9ba8: Running coprocessor pre-open hook at 1733181262948Writing region info on filesystem at 1733181262949 (+1 ms)Initializing all the Stores at 1733181262949Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733181262949Cleaning up temporary data from old regions at 1733181262954 (+5 ms)Running coprocessor post-open hooks at 1733181262957 (+3 ms)Region opened successfully at 1733181262958 (+1 ms) 2024-12-02T23:14:22,958 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1006): Region open journal for 53b6b7fd5134f3f13f85c7ab43d0a24d: Running coprocessor pre-open hook at 1733181262948Writing region info on filesystem at 1733181262949 (+1 ms)Initializing all the Stores at 1733181262949Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733181262949Cleaning up temporary data from old regions at 1733181262954 (+5 ms)Running coprocessor post-open hooks at 1733181262957 (+3 ms)Region opened successfully at 1733181262958 (+1 ms) 2024-12-02T23:14:22,959 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,,1733181262613.74f0c13379d206ce1601bf0b945b9ba8., pid=183, masterSystemTime=1733181262944 2024-12-02T23:14:22,959 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,1,1733181262613.53b6b7fd5134f3f13f85c7ab43d0a24d., pid=184, masterSystemTime=1733181262945 2024-12-02T23:14:22,960 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,1,1733181262613.53b6b7fd5134f3f13f85c7ab43d0a24d. 2024-12-02T23:14:22,960 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,1,1733181262613.53b6b7fd5134f3f13f85c7ab43d0a24d. 2024-12-02T23:14:22,961 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=182 updating hbase:meta row=53b6b7fd5134f3f13f85c7ab43d0a24d, regionState=OPEN, openSeqNum=2, regionLocation=9e5ab3b623d7,38049,1733180945779 2024-12-02T23:14:22,961 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,,1733181262613.74f0c13379d206ce1601bf0b945b9ba8. 2024-12-02T23:14:22,961 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,,1733181262613.74f0c13379d206ce1601bf0b945b9ba8. 2024-12-02T23:14:22,962 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=181 updating hbase:meta row=74f0c13379d206ce1601bf0b945b9ba8, regionState=OPEN, openSeqNum=2, regionLocation=9e5ab3b623d7,39847,1733180945622 2024-12-02T23:14:22,962 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=184, ppid=182, state=RUNNABLE, hasLock=false; OpenRegionProcedure 53b6b7fd5134f3f13f85c7ab43d0a24d, server=9e5ab3b623d7,38049,1733180945779 because future has completed 2024-12-02T23:14:22,963 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=183, ppid=181, state=RUNNABLE, hasLock=false; OpenRegionProcedure 74f0c13379d206ce1601bf0b945b9ba8, server=9e5ab3b623d7,39847,1733180945622 because future has completed 2024-12-02T23:14:22,966 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=184, resume processing ppid=182 2024-12-02T23:14:22,966 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=184, ppid=182, state=SUCCESS, hasLock=false; OpenRegionProcedure 53b6b7fd5134f3f13f85c7ab43d0a24d, server=9e5ab3b623d7,38049,1733180945779 in 171 msec 2024-12-02T23:14:22,967 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=183, resume processing ppid=181 2024-12-02T23:14:22,968 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=182, ppid=180, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=53b6b7fd5134f3f13f85c7ab43d0a24d, ASSIGN in 329 msec 2024-12-02T23:14:22,968 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=183, ppid=181, state=SUCCESS, hasLock=false; OpenRegionProcedure 74f0c13379d206ce1601bf0b945b9ba8, server=9e5ab3b623d7,39847,1733180945622 in 173 msec 2024-12-02T23:14:22,970 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=181, resume processing ppid=180 2024-12-02T23:14:22,970 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=181, ppid=180, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=74f0c13379d206ce1601bf0b945b9ba8, ASSIGN in 331 msec 2024-12-02T23:14:22,970 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T23:14:22,971 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181262970"}]},"ts":"1733181262970"} 2024-12-02T23:14:22,972 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-02T23:14:22,972 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T23:14:22,973 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testExportExpiredSnapshot jenkins: RWXCA 2024-12-02T23:14:22,975 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41311 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-02T23:14:22,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:14:22,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:14:22,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:14:22,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:14:22,981 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:22,981 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:22,981 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:22,981 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:22,981 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:22,981 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:22,981 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:22,981 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:22,982 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=180, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot in 367 msec 2024-12-02T23:14:23,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-02T23:14:23,242 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportExpiredSnapshot completed 2024-12-02T23:14:23,242 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T23:14:23,245 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-12-02T23:14:23,245 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1733181262613.74f0c13379d206ce1601bf0b945b9ba8. 2024-12-02T23:14:23,245 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T23:14:23,246 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T23:14:23,250 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T23:14:23,254 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T23:14:23,260 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39847 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,,1733181262613.74f0c13379d206ce1601bf0b945b9ba8. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T23:14:23,263 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38049 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,1,1733181262613.53b6b7fd5134f3f13f85c7ab43d0a24d. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T23:14:23,264 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T23:14:23,266 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-12-02T23:14:23,266 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1733181262613.74f0c13379d206ce1601bf0b945b9ba8. 2024-12-02T23:14:23,266 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T23:14:23,267 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T23:14:23,270 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T23:14:23,275 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-02T23:14:23,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-02T23:14:23,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T23:14:23,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e32fb9a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:23,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:14:23,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:14:23,276 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:14:23,276 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:14:23,276 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:14:23,277 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ba66161, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:23,277 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:14:23,277 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:14:23,277 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:23,277 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42914, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:14:23,278 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@335d07fb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:23,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:14:23,279 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:14:23,279 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:14:23,279 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59806, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:14:23,280 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:14:23,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:14:23,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:23,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:23,280 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:14:23,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29b0f8f9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:23,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:14:23,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:14:23,281 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:14:23,282 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:14:23,282 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:14:23,282 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d745469, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:23,282 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:14:23,282 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:14:23,282 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:23,283 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42944, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:14:23,283 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bbf1775, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:23,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:14:23,284 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:14:23,284 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:14:23,285 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59822, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:14:23,286 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b., hostname=9e5ab3b623d7,41311,1733180945828, seqNum=2] 2024-12-02T23:14:23,286 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:14:23,286 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:44938, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:14:23,287 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:14:23,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor223.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:14:23,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:23,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:23,287 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:14:23,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-02T23:14:23,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T23:14:23,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-02T23:14:23,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 185 2024-12-02T23:14:23,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-02T23:14:23,289 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PREPARE 2024-12-02T23:14:23,290 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T23:14:23,292 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T23:14:23,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742262_1438 (size=152) 2024-12-02T23:14:23,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742262_1438 (size=152) 2024-12-02T23:14:23,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742262_1438 (size=152) 2024-12-02T23:14:23,298 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T23:14:23,298 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 74f0c13379d206ce1601bf0b945b9ba8}, {pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 53b6b7fd5134f3f13f85c7ab43d0a24d}] 2024-12-02T23:14:23,299 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 74f0c13379d206ce1601bf0b945b9ba8 2024-12-02T23:14:23,299 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 53b6b7fd5134f3f13f85c7ab43d0a24d 2024-12-02T23:14:23,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-02T23:14:23,450 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39847 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=186 2024-12-02T23:14:23,450 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38049 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=187 2024-12-02T23:14:23,450 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,1,1733181262613.53b6b7fd5134f3f13f85c7ab43d0a24d. 2024-12-02T23:14:23,450 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,,1733181262613.74f0c13379d206ce1601bf0b945b9ba8. 2024-12-02T23:14:23,451 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2902): Flushing 74f0c13379d206ce1601bf0b945b9ba8 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-02T23:14:23,451 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2902): Flushing 53b6b7fd5134f3f13f85c7ab43d0a24d 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-02T23:14:23,467 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202ee202cd2738b44d982b847e60e119c46_74f0c13379d206ce1601bf0b945b9ba8 is 71, key is 047343be690daefdc61eadef0d6656a8/cf:q/1733181263260/Put/seqid=0 2024-12-02T23:14:23,468 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241202fe879b1feaca4cea897a6053ba895037_53b6b7fd5134f3f13f85c7ab43d0a24d is 71, key is 123e1e1676b22e521fc4fe2ef2654bb8/cf:q/1733181263263/Put/seqid=0 2024-12-02T23:14:23,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742263_1439 (size=5102) 2024-12-02T23:14:23,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742263_1439 (size=5102) 2024-12-02T23:14:23,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742263_1439 (size=5102) 2024-12-02T23:14:23,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742264_1440 (size=8171) 2024-12-02T23:14:23,473 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:14:23,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742264_1440 (size=8171) 2024-12-02T23:14:23,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742264_1440 (size=8171) 2024-12-02T23:14:23,474 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:14:23,477 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202ee202cd2738b44d982b847e60e119c46_74f0c13379d206ce1601bf0b945b9ba8 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e20241202ee202cd2738b44d982b847e60e119c46_74f0c13379d206ce1601bf0b945b9ba8 2024-12-02T23:14:23,478 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241202fe879b1feaca4cea897a6053ba895037_53b6b7fd5134f3f13f85c7ab43d0a24d to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b20241202fe879b1feaca4cea897a6053ba895037_53b6b7fd5134f3f13f85c7ab43d0a24d 2024-12-02T23:14:23,478 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportExpiredSnapshot/74f0c13379d206ce1601bf0b945b9ba8/.tmp/cf/7036d235ff6a40bd94ee3926d68d12ce, store: [table=testExportExpiredSnapshot family=cf region=74f0c13379d206ce1601bf0b945b9ba8] 2024-12-02T23:14:23,479 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportExpiredSnapshot/74f0c13379d206ce1601bf0b945b9ba8/.tmp/cf/7036d235ff6a40bd94ee3926d68d12ce is 202, key is 016485864bd2c2ac20784ac8161a30e95/cf:q/1733181263260/Put/seqid=0 2024-12-02T23:14:23,479 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportExpiredSnapshot/53b6b7fd5134f3f13f85c7ab43d0a24d/.tmp/cf/ad7115c1d0ef48a4bf09f21f22aa0097, store: [table=testExportExpiredSnapshot family=cf region=53b6b7fd5134f3f13f85c7ab43d0a24d] 2024-12-02T23:14:23,479 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportExpiredSnapshot/53b6b7fd5134f3f13f85c7ab43d0a24d/.tmp/cf/ad7115c1d0ef48a4bf09f21f22aa0097 is 202, key is 1a6566d16917fd5e7953aee3174a08196/cf:q/1733181263263/Put/seqid=0 2024-12-02T23:14:23,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742265_1441 (size=5890) 2024-12-02T23:14:23,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742265_1441 (size=5890) 2024-12-02T23:14:23,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742265_1441 (size=5890) 2024-12-02T23:14:23,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742266_1442 (size=14661) 2024-12-02T23:14:23,485 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportExpiredSnapshot/74f0c13379d206ce1601bf0b945b9ba8/.tmp/cf/7036d235ff6a40bd94ee3926d68d12ce 2024-12-02T23:14:23,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742266_1442 (size=14661) 2024-12-02T23:14:23,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742266_1442 (size=14661) 2024-12-02T23:14:23,485 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportExpiredSnapshot/53b6b7fd5134f3f13f85c7ab43d0a24d/.tmp/cf/ad7115c1d0ef48a4bf09f21f22aa0097 2024-12-02T23:14:23,489 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportExpiredSnapshot/74f0c13379d206ce1601bf0b945b9ba8/.tmp/cf/7036d235ff6a40bd94ee3926d68d12ce as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportExpiredSnapshot/74f0c13379d206ce1601bf0b945b9ba8/cf/7036d235ff6a40bd94ee3926d68d12ce 2024-12-02T23:14:23,489 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportExpiredSnapshot/53b6b7fd5134f3f13f85c7ab43d0a24d/.tmp/cf/ad7115c1d0ef48a4bf09f21f22aa0097 as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportExpiredSnapshot/53b6b7fd5134f3f13f85c7ab43d0a24d/cf/ad7115c1d0ef48a4bf09f21f22aa0097 2024-12-02T23:14:23,493 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportExpiredSnapshot/74f0c13379d206ce1601bf0b945b9ba8/cf/7036d235ff6a40bd94ee3926d68d12ce, entries=3, sequenceid=5, filesize=5.8 K 2024-12-02T23:14:23,494 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportExpiredSnapshot/53b6b7fd5134f3f13f85c7ab43d0a24d/cf/ad7115c1d0ef48a4bf09f21f22aa0097, entries=47, sequenceid=5, filesize=14.3 K 2024-12-02T23:14:23,494 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 74f0c13379d206ce1601bf0b945b9ba8 in 44ms, sequenceid=5, compaction requested=false 2024-12-02T23:14:23,494 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-12-02T23:14:23,494 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 53b6b7fd5134f3f13f85c7ab43d0a24d in 44ms, sequenceid=5, compaction requested=false 2024-12-02T23:14:23,495 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-12-02T23:14:23,495 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2603): Flush status journal for 74f0c13379d206ce1601bf0b945b9ba8: 2024-12-02T23:14:23,495 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2603): Flush status journal for 53b6b7fd5134f3f13f85c7ab43d0a24d: 2024-12-02T23:14:23,495 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,,1733181262613.74f0c13379d206ce1601bf0b945b9ba8. for snapshot-testExportExpiredSnapshot completed. 2024-12-02T23:14:23,495 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,1,1733181262613.53b6b7fd5134f3f13f85c7ab43d0a24d. for snapshot-testExportExpiredSnapshot completed. 2024-12-02T23:14:23,495 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,,1733181262613.74f0c13379d206ce1601bf0b945b9ba8.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-02T23:14:23,495 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,1,1733181262613.53b6b7fd5134f3f13f85c7ab43d0a24d.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-02T23:14:23,495 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:14:23,495 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:14:23,495 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportExpiredSnapshot/74f0c13379d206ce1601bf0b945b9ba8/cf/7036d235ff6a40bd94ee3926d68d12ce] hfiles 2024-12-02T23:14:23,495 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportExpiredSnapshot/53b6b7fd5134f3f13f85c7ab43d0a24d/cf/ad7115c1d0ef48a4bf09f21f22aa0097] hfiles 2024-12-02T23:14:23,495 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportExpiredSnapshot/74f0c13379d206ce1601bf0b945b9ba8/cf/7036d235ff6a40bd94ee3926d68d12ce for snapshot=snapshot-testExportExpiredSnapshot 2024-12-02T23:14:23,495 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportExpiredSnapshot/53b6b7fd5134f3f13f85c7ab43d0a24d/cf/ad7115c1d0ef48a4bf09f21f22aa0097 for snapshot=snapshot-testExportExpiredSnapshot 2024-12-02T23:14:23,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742267_1443 (size=103) 2024-12-02T23:14:23,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742268_1444 (size=103) 2024-12-02T23:14:23,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742268_1444 (size=103) 2024-12-02T23:14:23,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742267_1443 (size=103) 2024-12-02T23:14:23,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742267_1443 (size=103) 2024-12-02T23:14:23,501 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,,1733181262613.74f0c13379d206ce1601bf0b945b9ba8. 2024-12-02T23:14:23,501 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=186 2024-12-02T23:14:23,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742268_1444 (size=103) 2024-12-02T23:14:23,501 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,1,1733181262613.53b6b7fd5134f3f13f85c7ab43d0a24d. 2024-12-02T23:14:23,501 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=187 2024-12-02T23:14:23,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=186 2024-12-02T23:14:23,501 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 74f0c13379d206ce1601bf0b945b9ba8 2024-12-02T23:14:23,502 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 74f0c13379d206ce1601bf0b945b9ba8 2024-12-02T23:14:23,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=187 2024-12-02T23:14:23,502 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 53b6b7fd5134f3f13f85c7ab43d0a24d 2024-12-02T23:14:23,502 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 53b6b7fd5134f3f13f85c7ab43d0a24d 2024-12-02T23:14:23,503 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=186, ppid=185, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 74f0c13379d206ce1601bf0b945b9ba8 in 204 msec 2024-12-02T23:14:23,504 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=187, resume processing ppid=185 2024-12-02T23:14:23,504 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=187, ppid=185, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 53b6b7fd5134f3f13f85c7ab43d0a24d in 204 msec 2024-12-02T23:14:23,504 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T23:14:23,505 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T23:14:23,506 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T23:14:23,506 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T23:14:23,506 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:14:23,507 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b20241202fe879b1feaca4cea897a6053ba895037_53b6b7fd5134f3f13f85c7ab43d0a24d, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e20241202ee202cd2738b44d982b847e60e119c46_74f0c13379d206ce1601bf0b945b9ba8] hfiles 2024-12-02T23:14:23,507 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b20241202fe879b1feaca4cea897a6053ba895037_53b6b7fd5134f3f13f85c7ab43d0a24d 2024-12-02T23:14:23,507 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e20241202ee202cd2738b44d982b847e60e119c46_74f0c13379d206ce1601bf0b945b9ba8 2024-12-02T23:14:23,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742269_1445 (size=287) 2024-12-02T23:14:23,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742269_1445 (size=287) 2024-12-02T23:14:23,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742269_1445 (size=287) 2024-12-02T23:14:23,513 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T23:14:23,513 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportExpiredSnapshot 2024-12-02T23:14:23,513 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-02T23:14:23,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742270_1446 (size=935) 2024-12-02T23:14:23,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742270_1446 (size=935) 2024-12-02T23:14:23,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742270_1446 (size=935) 2024-12-02T23:14:23,522 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T23:14:23,527 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T23:14:23,527 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snapshot-testExportExpiredSnapshot 2024-12-02T23:14:23,528 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T23:14:23,528 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 185 2024-12-02T23:14:23,529 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=185, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } in 240 msec 2024-12-02T23:14:23,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-02T23:14:23,602 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportExpiredSnapshot completed 2024-12-02T23:14:23,841 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0007_000001 (auth:SIMPLE) from 127.0.0.1:46812 2024-12-02T23:14:23,852 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_3/usercache/jenkins/appcache/application_1733180955226_0007/container_1733180955226_0007_01_000001/launch_container.sh] 2024-12-02T23:14:23,853 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_3/usercache/jenkins/appcache/application_1733180955226_0007/container_1733180955226_0007_01_000001/container_tokens] 2024-12-02T23:14:23,853 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_3/usercache/jenkins/appcache/application_1733180955226_0007/container_1733180955226_0007_01_000001/sysfs] 2024-12-02T23:14:25,214 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T23:14:25,254 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-02T23:14:25,254 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-02T23:14:25,254 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-02T23:14:25,254 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-02T23:14:25,254 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T23:14:25,255 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T23:14:30,757 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T23:14:33,599 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T23:14:33,608 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181273608 2024-12-02T23:14:33,609 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:33369, tgtDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181273608, rawTgtDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181273608, srcFsUri=hdfs://localhost:33369, srcDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:14:33,657 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:33369, inputRoot=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:14:33,657 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_71179475_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181273608, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181273608/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-02T23:14:33,663 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-02T23:14:33,665 ERROR [Time-limited test {}] util.AbstractHBaseTool(152): Error running command-line tool org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException: TTL for snapshot 'snapshot-testExportExpiredSnapshot' has already expired. at org.apache.hadoop.hbase.snapshot.ExportSnapshot.verifySnapshot(ExportSnapshot.java:960) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1105) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:570) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportExpiredSnapshot(TestExportSnapshot.java:362) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T23:14:33,666 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testExportExpiredSnapshot 2024-12-02T23:14:33,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=188, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-02T23:14:33,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-02T23:14:33,684 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181273684"}]},"ts":"1733181273684"} 2024-12-02T23:14:33,692 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLING in hbase:meta 2024-12-02T23:14:33,692 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportExpiredSnapshot to state=DISABLING 2024-12-02T23:14:33,693 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=189, ppid=188, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot}] 2024-12-02T23:14:33,696 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=ad49f520e0d675103d5333bf70f6b644, UNASSIGN}, {pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=9f38675d8291c59d08c64a8f6a377b82, UNASSIGN}] 2024-12-02T23:14:33,699 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=9f38675d8291c59d08c64a8f6a377b82, UNASSIGN 2024-12-02T23:14:33,700 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=191 updating hbase:meta row=9f38675d8291c59d08c64a8f6a377b82, regionState=CLOSING, regionLocation=9e5ab3b623d7,39847,1733180945622 2024-12-02T23:14:33,701 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=ad49f520e0d675103d5333bf70f6b644, UNASSIGN 2024-12-02T23:14:33,704 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=190 updating hbase:meta row=ad49f520e0d675103d5333bf70f6b644, regionState=CLOSING, regionLocation=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:14:33,707 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=9f38675d8291c59d08c64a8f6a377b82, UNASSIGN because future has completed 2024-12-02T23:14:33,709 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T23:14:33,709 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=192, ppid=191, state=RUNNABLE, hasLock=false; CloseRegionProcedure 9f38675d8291c59d08c64a8f6a377b82, server=9e5ab3b623d7,39847,1733180945622}] 2024-12-02T23:14:33,711 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=ad49f520e0d675103d5333bf70f6b644, UNASSIGN because future has completed 2024-12-02T23:14:33,712 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T23:14:33,712 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=193, ppid=190, state=RUNNABLE, hasLock=false; CloseRegionProcedure ad49f520e0d675103d5333bf70f6b644, server=9e5ab3b623d7,41311,1733180945828}] 2024-12-02T23:14:33,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-02T23:14:33,878 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(122): Close 9f38675d8291c59d08c64a8f6a377b82 2024-12-02T23:14:33,878 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T23:14:33,878 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1722): Closing 9f38675d8291c59d08c64a8f6a377b82, disabling compactions & flushes 2024-12-02T23:14:33,879 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1733181260456.9f38675d8291c59d08c64a8f6a377b82. 2024-12-02T23:14:33,879 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1733181260456.9f38675d8291c59d08c64a8f6a377b82. 2024-12-02T23:14:33,879 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1733181260456.9f38675d8291c59d08c64a8f6a377b82. after waiting 0 ms 2024-12-02T23:14:33,879 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1733181260456.9f38675d8291c59d08c64a8f6a377b82. 2024-12-02T23:14:33,884 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(122): Close ad49f520e0d675103d5333bf70f6b644 2024-12-02T23:14:33,884 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T23:14:33,884 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1722): Closing ad49f520e0d675103d5333bf70f6b644, disabling compactions & flushes 2024-12-02T23:14:33,884 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1733181260456.ad49f520e0d675103d5333bf70f6b644. 2024-12-02T23:14:33,884 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1733181260456.ad49f520e0d675103d5333bf70f6b644. 2024-12-02T23:14:33,884 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1733181260456.ad49f520e0d675103d5333bf70f6b644. after waiting 0 ms 2024-12-02T23:14:33,884 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1733181260456.ad49f520e0d675103d5333bf70f6b644. 2024-12-02T23:14:33,915 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportExpiredSnapshot/ad49f520e0d675103d5333bf70f6b644/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T23:14:33,917 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:14:33,917 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1733181260456.ad49f520e0d675103d5333bf70f6b644. 2024-12-02T23:14:33,917 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1676): Region close journal for ad49f520e0d675103d5333bf70f6b644: Waiting for close lock at 1733181273884Running coprocessor pre-close hooks at 1733181273884Disabling compacts and flushes for region at 1733181273884Disabling writes for close at 1733181273884Writing region close event to WAL at 1733181273901 (+17 ms)Running coprocessor post-close hooks at 1733181273917 (+16 ms)Closed at 1733181273917 2024-12-02T23:14:33,922 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(157): Closed ad49f520e0d675103d5333bf70f6b644 2024-12-02T23:14:33,926 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=190 updating hbase:meta row=ad49f520e0d675103d5333bf70f6b644, regionState=CLOSED 2024-12-02T23:14:33,930 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=193, ppid=190, state=RUNNABLE, hasLock=false; CloseRegionProcedure ad49f520e0d675103d5333bf70f6b644, server=9e5ab3b623d7,41311,1733180945828 because future has completed 2024-12-02T23:14:33,932 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportExpiredSnapshot/9f38675d8291c59d08c64a8f6a377b82/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T23:14:33,934 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:14:33,934 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1733181260456.9f38675d8291c59d08c64a8f6a377b82. 2024-12-02T23:14:33,934 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1676): Region close journal for 9f38675d8291c59d08c64a8f6a377b82: Waiting for close lock at 1733181273878Running coprocessor pre-close hooks at 1733181273878Disabling compacts and flushes for region at 1733181273878Disabling writes for close at 1733181273879 (+1 ms)Writing region close event to WAL at 1733181273901 (+22 ms)Running coprocessor post-close hooks at 1733181273933 (+32 ms)Closed at 1733181273934 (+1 ms) 2024-12-02T23:14:33,935 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=193, resume processing ppid=190 2024-12-02T23:14:33,935 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=193, ppid=190, state=SUCCESS, hasLock=false; CloseRegionProcedure ad49f520e0d675103d5333bf70f6b644, server=9e5ab3b623d7,41311,1733180945828 in 219 msec 2024-12-02T23:14:33,937 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(157): Closed 9f38675d8291c59d08c64a8f6a377b82 2024-12-02T23:14:33,937 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=190, ppid=189, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=ad49f520e0d675103d5333bf70f6b644, UNASSIGN in 239 msec 2024-12-02T23:14:33,938 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=191 updating hbase:meta row=9f38675d8291c59d08c64a8f6a377b82, regionState=CLOSED 2024-12-02T23:14:33,941 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=192, ppid=191, state=RUNNABLE, hasLock=false; CloseRegionProcedure 9f38675d8291c59d08c64a8f6a377b82, server=9e5ab3b623d7,39847,1733180945622 because future has completed 2024-12-02T23:14:33,950 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=192, resume processing ppid=191 2024-12-02T23:14:33,950 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=192, ppid=191, state=SUCCESS, hasLock=false; CloseRegionProcedure 9f38675d8291c59d08c64a8f6a377b82, server=9e5ab3b623d7,39847,1733180945622 in 238 msec 2024-12-02T23:14:33,952 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=191, resume processing ppid=189 2024-12-02T23:14:33,952 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=191, ppid=189, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=9f38675d8291c59d08c64a8f6a377b82, UNASSIGN in 254 msec 2024-12-02T23:14:33,956 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=189, resume processing ppid=188 2024-12-02T23:14:33,956 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=189, ppid=188, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot in 260 msec 2024-12-02T23:14:33,958 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181273957"}]},"ts":"1733181273957"} 2024-12-02T23:14:33,965 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLED in hbase:meta 2024-12-02T23:14:33,965 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportExpiredSnapshot to state=DISABLED 2024-12-02T23:14:33,968 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=188, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot in 299 msec 2024-12-02T23:14:34,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-02T23:14:34,002 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-02T23:14:34,008 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testExportExpiredSnapshot 2024-12-02T23:14:34,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=194, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-02T23:14:34,013 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=194, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-02T23:14:34,015 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=194, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-02T23:14:34,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportExpiredSnapshot 2024-12-02T23:14:34,025 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41311 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportExpiredSnapshot 2024-12-02T23:14:34,029 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportExpiredSnapshot/ad49f520e0d675103d5333bf70f6b644 2024-12-02T23:14:34,032 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportExpiredSnapshot/9f38675d8291c59d08c64a8f6a377b82 2024-12-02T23:14:34,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-02T23:14:34,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-02T23:14:34,048 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportExpiredSnapshot/9f38675d8291c59d08c64a8f6a377b82/cf, FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportExpiredSnapshot/9f38675d8291c59d08c64a8f6a377b82/recovered.edits] 2024-12-02T23:14:34,049 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportExpiredSnapshot/ad49f520e0d675103d5333bf70f6b644/cf, FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportExpiredSnapshot/ad49f520e0d675103d5333bf70f6b644/recovered.edits] 2024-12-02T23:14:34,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-02T23:14:34,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-02T23:14:34,053 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-02T23:14:34,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:14:34,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:14:34,058 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data null 2024-12-02T23:14:34,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=194 2024-12-02T23:14:34,058 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-02T23:14:34,058 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data null 2024-12-02T23:14:34,058 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-02T23:14:34,059 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data null 2024-12-02T23:14:34,059 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-02T23:14:34,061 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:14:34,062 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:34,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-02T23:14:34,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:14:34,067 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:34,070 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:34,072 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:34,089 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportExpiredSnapshot/9f38675d8291c59d08c64a8f6a377b82/cf/cc89779099c24f2bb3bfb258261a1c8d to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportExpiredSnapshot/9f38675d8291c59d08c64a8f6a377b82/cf/cc89779099c24f2bb3bfb258261a1c8d 2024-12-02T23:14:34,089 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportExpiredSnapshot/ad49f520e0d675103d5333bf70f6b644/cf/8ba5541674bc42549199d1d6a3bfc170 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportExpiredSnapshot/ad49f520e0d675103d5333bf70f6b644/cf/8ba5541674bc42549199d1d6a3bfc170 2024-12-02T23:14:34,104 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportExpiredSnapshot/9f38675d8291c59d08c64a8f6a377b82/recovered.edits/9.seqid to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportExpiredSnapshot/9f38675d8291c59d08c64a8f6a377b82/recovered.edits/9.seqid 2024-12-02T23:14:34,105 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportExpiredSnapshot/9f38675d8291c59d08c64a8f6a377b82 2024-12-02T23:14:34,107 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportExpiredSnapshot/ad49f520e0d675103d5333bf70f6b644/recovered.edits/9.seqid to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportExpiredSnapshot/ad49f520e0d675103d5333bf70f6b644/recovered.edits/9.seqid 2024-12-02T23:14:34,108 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportExpiredSnapshot/ad49f520e0d675103d5333bf70f6b644 2024-12-02T23:14:34,108 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportExpiredSnapshot regions 2024-12-02T23:14:34,108 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f 2024-12-02T23:14:34,109 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf] 2024-12-02T23:14:34,115 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241202186d93f50b47476c8318302c3831dac3_9f38675d8291c59d08c64a8f6a377b82 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241202186d93f50b47476c8318302c3831dac3_9f38675d8291c59d08c64a8f6a377b82 2024-12-02T23:14:34,117 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241202a5283eb847ba418eb8a57733051acd04_ad49f520e0d675103d5333bf70f6b644 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241202a5283eb847ba418eb8a57733051acd04_ad49f520e0d675103d5333bf70f6b644 2024-12-02T23:14:34,117 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f 2024-12-02T23:14:34,120 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=194, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-02T23:14:34,123 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportExpiredSnapshot from hbase:meta 2024-12-02T23:14:34,129 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportExpiredSnapshot' descriptor. 2024-12-02T23:14:34,131 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=194, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-02T23:14:34,131 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportExpiredSnapshot' from region states. 2024-12-02T23:14:34,131 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,,1733181260456.ad49f520e0d675103d5333bf70f6b644.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733181274131"}]},"ts":"9223372036854775807"} 2024-12-02T23:14:34,131 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,1,1733181260456.9f38675d8291c59d08c64a8f6a377b82.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733181274131"}]},"ts":"9223372036854775807"} 2024-12-02T23:14:34,134 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-02T23:14:34,134 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => ad49f520e0d675103d5333bf70f6b644, NAME => 'testtb-testExportExpiredSnapshot,,1733181260456.ad49f520e0d675103d5333bf70f6b644.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 9f38675d8291c59d08c64a8f6a377b82, NAME => 'testtb-testExportExpiredSnapshot,1,1733181260456.9f38675d8291c59d08c64a8f6a377b82.', STARTKEY => '1', ENDKEY => ''}] 2024-12-02T23:14:34,134 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportExpiredSnapshot' as deleted. 2024-12-02T23:14:34,134 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733181274134"}]},"ts":"9223372036854775807"} 2024-12-02T23:14:34,136 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportExpiredSnapshot state from META 2024-12-02T23:14:34,138 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=194, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-02T23:14:34,139 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=194, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot in 130 msec 2024-12-02T23:14:34,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=194 2024-12-02T23:14:34,169 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportExpiredSnapshot 2024-12-02T23:14:34,169 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-02T23:14:34,254 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testExportExpiredSnapshot" type: DISABLED 2024-12-02T23:14:34,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportExpiredSnapshot 2024-12-02T23:14:34,278 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snapshot-testExportExpiredSnapshot" type: DISABLED 2024-12-02T23:14:34,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportExpiredSnapshot 2024-12-02T23:14:34,293 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testExportExpiredSnapshot" type: DISABLED 2024-12-02T23:14:34,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportExpiredSnapshot 2024-12-02T23:14:34,353 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportExpiredSnapshot Thread=802 (was 810), OpenFileDescriptor=777 (was 804), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1001 (was 1039), ProcessCount=16 (was 16), AvailableMemoryMB=2115 (was 1386) - AvailableMemoryMB LEAK? - 2024-12-02T23:14:34,353 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=802 is superior to 500 2024-12-02T23:14:34,390 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testEmptyExportFileSystemState Thread=802, OpenFileDescriptor=777, MaxFileDescriptor=1048576, SystemLoadAverage=1001, ProcessCount=16, AvailableMemoryMB=2115 2024-12-02T23:14:34,390 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=802 is superior to 500 2024-12-02T23:14:34,394 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T23:14:34,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=195, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-02T23:14:34,400 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T23:14:34,400 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testEmptyExportFileSystemState" procId is: 195 2024-12-02T23:14:34,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-02T23:14:34,402 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T23:14:34,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742271_1447 (size=448) 2024-12-02T23:14:34,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742271_1447 (size=448) 2024-12-02T23:14:34,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742271_1447 (size=448) 2024-12-02T23:14:34,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-02T23:14:34,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-02T23:14:34,869 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 4f2b7b3c8280b03935fda802a9eb2d2e, NAME => 'testtb-testEmptyExportFileSystemState,,1733181274393.4f2b7b3c8280b03935fda802a9eb2d2e.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:14:34,870 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => b34bb32e23f8345a23dcec225714af08, NAME => 'testtb-testEmptyExportFileSystemState,1,1733181274393.b34bb32e23f8345a23dcec225714af08.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:14:34,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742272_1448 (size=73) 2024-12-02T23:14:34,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742272_1448 (size=73) 2024-12-02T23:14:34,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742272_1448 (size=73) 2024-12-02T23:14:34,895 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1733181274393.4f2b7b3c8280b03935fda802a9eb2d2e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:14:34,895 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing 4f2b7b3c8280b03935fda802a9eb2d2e, disabling compactions & flushes 2024-12-02T23:14:34,895 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1733181274393.4f2b7b3c8280b03935fda802a9eb2d2e. 2024-12-02T23:14:34,895 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1733181274393.4f2b7b3c8280b03935fda802a9eb2d2e. 2024-12-02T23:14:34,895 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1733181274393.4f2b7b3c8280b03935fda802a9eb2d2e. after waiting 0 ms 2024-12-02T23:14:34,895 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1733181274393.4f2b7b3c8280b03935fda802a9eb2d2e. 2024-12-02T23:14:34,895 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1733181274393.4f2b7b3c8280b03935fda802a9eb2d2e. 2024-12-02T23:14:34,895 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for 4f2b7b3c8280b03935fda802a9eb2d2e: Waiting for close lock at 1733181274895Disabling compacts and flushes for region at 1733181274895Disabling writes for close at 1733181274895Writing region close event to WAL at 1733181274895Closed at 1733181274895 2024-12-02T23:14:34,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742273_1449 (size=73) 2024-12-02T23:14:34,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742273_1449 (size=73) 2024-12-02T23:14:34,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742273_1449 (size=73) 2024-12-02T23:14:34,899 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1733181274393.b34bb32e23f8345a23dcec225714af08.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:14:34,899 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing b34bb32e23f8345a23dcec225714af08, disabling compactions & flushes 2024-12-02T23:14:34,899 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1733181274393.b34bb32e23f8345a23dcec225714af08. 2024-12-02T23:14:34,899 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1733181274393.b34bb32e23f8345a23dcec225714af08. 2024-12-02T23:14:34,899 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1733181274393.b34bb32e23f8345a23dcec225714af08. after waiting 0 ms 2024-12-02T23:14:34,899 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1733181274393.b34bb32e23f8345a23dcec225714af08. 2024-12-02T23:14:34,899 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1733181274393.b34bb32e23f8345a23dcec225714af08. 2024-12-02T23:14:34,899 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for b34bb32e23f8345a23dcec225714af08: Waiting for close lock at 1733181274899Disabling compacts and flushes for region at 1733181274899Disabling writes for close at 1733181274899Writing region close event to WAL at 1733181274899Closed at 1733181274899 2024-12-02T23:14:34,900 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T23:14:34,900 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,,1733181274393.4f2b7b3c8280b03935fda802a9eb2d2e.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1733181274900"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733181274900"}]},"ts":"1733181274900"} 2024-12-02T23:14:34,900 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,1,1733181274393.b34bb32e23f8345a23dcec225714af08.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1733181274900"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733181274900"}]},"ts":"1733181274900"} 2024-12-02T23:14:34,902 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T23:14:34,903 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T23:14:34,904 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181274903"}]},"ts":"1733181274903"} 2024-12-02T23:14:34,906 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLING in hbase:meta 2024-12-02T23:14:34,906 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {9e5ab3b623d7=0} racks are {/default-rack=0} 2024-12-02T23:14:34,907 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T23:14:34,907 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T23:14:34,907 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T23:14:34,907 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T23:14:34,907 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T23:14:34,907 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T23:14:34,907 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T23:14:34,907 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T23:14:34,907 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T23:14:34,907 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T23:14:34,908 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=4f2b7b3c8280b03935fda802a9eb2d2e, ASSIGN}, {pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=b34bb32e23f8345a23dcec225714af08, ASSIGN}] 2024-12-02T23:14:34,909 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=b34bb32e23f8345a23dcec225714af08, ASSIGN 2024-12-02T23:14:34,909 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=4f2b7b3c8280b03935fda802a9eb2d2e, ASSIGN 2024-12-02T23:14:34,910 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=b34bb32e23f8345a23dcec225714af08, ASSIGN; state=OFFLINE, location=9e5ab3b623d7,41311,1733180945828; forceNewPlan=false, retain=false 2024-12-02T23:14:34,910 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=4f2b7b3c8280b03935fda802a9eb2d2e, ASSIGN; state=OFFLINE, location=9e5ab3b623d7,38049,1733180945779; forceNewPlan=false, retain=false 2024-12-02T23:14:35,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-02T23:14:35,060 INFO [9e5ab3b623d7:41513 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T23:14:35,061 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=197 updating hbase:meta row=b34bb32e23f8345a23dcec225714af08, regionState=OPENING, regionLocation=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:14:35,061 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=196 updating hbase:meta row=4f2b7b3c8280b03935fda802a9eb2d2e, regionState=OPENING, regionLocation=9e5ab3b623d7,38049,1733180945779 2024-12-02T23:14:35,063 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=b34bb32e23f8345a23dcec225714af08, ASSIGN because future has completed 2024-12-02T23:14:35,063 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=198, ppid=197, state=RUNNABLE, hasLock=false; OpenRegionProcedure b34bb32e23f8345a23dcec225714af08, server=9e5ab3b623d7,41311,1733180945828}] 2024-12-02T23:14:35,064 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=4f2b7b3c8280b03935fda802a9eb2d2e, ASSIGN because future has completed 2024-12-02T23:14:35,064 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=199, ppid=196, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4f2b7b3c8280b03935fda802a9eb2d2e, server=9e5ab3b623d7,38049,1733180945779}] 2024-12-02T23:14:35,202 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-02T23:14:35,220 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,1,1733181274393.b34bb32e23f8345a23dcec225714af08. 2024-12-02T23:14:35,220 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7752): Opening region: {ENCODED => b34bb32e23f8345a23dcec225714af08, NAME => 'testtb-testEmptyExportFileSystemState,1,1733181274393.b34bb32e23f8345a23dcec225714af08.', STARTKEY => '1', ENDKEY => ''} 2024-12-02T23:14:35,220 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,1,1733181274393.b34bb32e23f8345a23dcec225714af08. service=AccessControlService 2024-12-02T23:14:35,221 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T23:14:35,221 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState b34bb32e23f8345a23dcec225714af08 2024-12-02T23:14:35,221 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1733181274393.b34bb32e23f8345a23dcec225714af08.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:14:35,221 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7794): checking encryption for b34bb32e23f8345a23dcec225714af08 2024-12-02T23:14:35,221 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7797): checking classloading for b34bb32e23f8345a23dcec225714af08 2024-12-02T23:14:35,226 INFO [StoreOpener-b34bb32e23f8345a23dcec225714af08-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b34bb32e23f8345a23dcec225714af08 2024-12-02T23:14:35,228 INFO [StoreOpener-b34bb32e23f8345a23dcec225714af08-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b34bb32e23f8345a23dcec225714af08 columnFamilyName cf 2024-12-02T23:14:35,229 DEBUG [StoreOpener-b34bb32e23f8345a23dcec225714af08-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:14:35,230 INFO [StoreOpener-b34bb32e23f8345a23dcec225714af08-1 {}] regionserver.HStore(327): Store=b34bb32e23f8345a23dcec225714af08/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T23:14:35,230 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1038): replaying wal for b34bb32e23f8345a23dcec225714af08 2024-12-02T23:14:35,231 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testEmptyExportFileSystemState/b34bb32e23f8345a23dcec225714af08 2024-12-02T23:14:35,231 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testEmptyExportFileSystemState/b34bb32e23f8345a23dcec225714af08 2024-12-02T23:14:35,232 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1048): stopping wal replay for b34bb32e23f8345a23dcec225714af08 2024-12-02T23:14:35,232 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1060): Cleaning up temporary data for b34bb32e23f8345a23dcec225714af08 2024-12-02T23:14:35,233 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1093): writing seq id for b34bb32e23f8345a23dcec225714af08 2024-12-02T23:14:35,240 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,,1733181274393.4f2b7b3c8280b03935fda802a9eb2d2e. 2024-12-02T23:14:35,240 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7752): Opening region: {ENCODED => 4f2b7b3c8280b03935fda802a9eb2d2e, NAME => 'testtb-testEmptyExportFileSystemState,,1733181274393.4f2b7b3c8280b03935fda802a9eb2d2e.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T23:14:35,240 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,,1733181274393.4f2b7b3c8280b03935fda802a9eb2d2e. service=AccessControlService 2024-12-02T23:14:35,240 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T23:14:35,240 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 4f2b7b3c8280b03935fda802a9eb2d2e 2024-12-02T23:14:35,240 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1733181274393.4f2b7b3c8280b03935fda802a9eb2d2e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:14:35,241 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7794): checking encryption for 4f2b7b3c8280b03935fda802a9eb2d2e 2024-12-02T23:14:35,241 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7797): checking classloading for 4f2b7b3c8280b03935fda802a9eb2d2e 2024-12-02T23:14:35,242 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testEmptyExportFileSystemState/b34bb32e23f8345a23dcec225714af08/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T23:14:35,242 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1114): Opened b34bb32e23f8345a23dcec225714af08; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66498713, jitterRate=-0.009091958403587341}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T23:14:35,242 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b34bb32e23f8345a23dcec225714af08 2024-12-02T23:14:35,243 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1006): Region open journal for b34bb32e23f8345a23dcec225714af08: Running coprocessor pre-open hook at 1733181275221Writing region info on filesystem at 1733181275221Initializing all the Stores at 1733181275222 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733181275222Cleaning up temporary data from old regions at 1733181275232 (+10 ms)Running coprocessor post-open hooks at 1733181275242 (+10 ms)Region opened successfully at 1733181275243 (+1 ms) 2024-12-02T23:14:35,244 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,1,1733181274393.b34bb32e23f8345a23dcec225714af08., pid=198, masterSystemTime=1733181275215 2024-12-02T23:14:35,245 INFO [StoreOpener-4f2b7b3c8280b03935fda802a9eb2d2e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 4f2b7b3c8280b03935fda802a9eb2d2e 2024-12-02T23:14:35,246 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,1,1733181274393.b34bb32e23f8345a23dcec225714af08. 2024-12-02T23:14:35,246 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,1,1733181274393.b34bb32e23f8345a23dcec225714af08. 2024-12-02T23:14:35,248 INFO [StoreOpener-4f2b7b3c8280b03935fda802a9eb2d2e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4f2b7b3c8280b03935fda802a9eb2d2e columnFamilyName cf 2024-12-02T23:14:35,248 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=197 updating hbase:meta row=b34bb32e23f8345a23dcec225714af08, regionState=OPEN, openSeqNum=2, regionLocation=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:14:35,248 DEBUG [StoreOpener-4f2b7b3c8280b03935fda802a9eb2d2e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:14:35,249 INFO [StoreOpener-4f2b7b3c8280b03935fda802a9eb2d2e-1 {}] regionserver.HStore(327): Store=4f2b7b3c8280b03935fda802a9eb2d2e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T23:14:35,250 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1038): replaying wal for 4f2b7b3c8280b03935fda802a9eb2d2e 2024-12-02T23:14:35,250 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testEmptyExportFileSystemState/4f2b7b3c8280b03935fda802a9eb2d2e 2024-12-02T23:14:35,251 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testEmptyExportFileSystemState/4f2b7b3c8280b03935fda802a9eb2d2e 2024-12-02T23:14:35,251 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1048): stopping wal replay for 4f2b7b3c8280b03935fda802a9eb2d2e 2024-12-02T23:14:35,251 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1060): Cleaning up temporary data for 4f2b7b3c8280b03935fda802a9eb2d2e 2024-12-02T23:14:35,253 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1093): writing seq id for 4f2b7b3c8280b03935fda802a9eb2d2e 2024-12-02T23:14:35,255 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testEmptyExportFileSystemState/4f2b7b3c8280b03935fda802a9eb2d2e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T23:14:35,255 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1114): Opened 4f2b7b3c8280b03935fda802a9eb2d2e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71628116, jitterRate=0.06734210252761841}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T23:14:35,255 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4f2b7b3c8280b03935fda802a9eb2d2e 2024-12-02T23:14:35,255 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1006): Region open journal for 4f2b7b3c8280b03935fda802a9eb2d2e: Running coprocessor pre-open hook at 1733181275241Writing region info on filesystem at 1733181275241Initializing all the Stores at 1733181275242 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733181275242Cleaning up temporary data from old regions at 1733181275251 (+9 ms)Running coprocessor post-open hooks at 1733181275255 (+4 ms)Region opened successfully at 1733181275255 2024-12-02T23:14:35,256 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,,1733181274393.4f2b7b3c8280b03935fda802a9eb2d2e., pid=199, masterSystemTime=1733181275217 2024-12-02T23:14:35,258 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=198, ppid=197, state=RUNNABLE, hasLock=false; OpenRegionProcedure b34bb32e23f8345a23dcec225714af08, server=9e5ab3b623d7,41311,1733180945828 because future has completed 2024-12-02T23:14:35,261 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,,1733181274393.4f2b7b3c8280b03935fda802a9eb2d2e. 2024-12-02T23:14:35,261 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,,1733181274393.4f2b7b3c8280b03935fda802a9eb2d2e. 2024-12-02T23:14:35,262 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=196 updating hbase:meta row=4f2b7b3c8280b03935fda802a9eb2d2e, regionState=OPEN, openSeqNum=2, regionLocation=9e5ab3b623d7,38049,1733180945779 2024-12-02T23:14:35,264 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=199, ppid=196, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4f2b7b3c8280b03935fda802a9eb2d2e, server=9e5ab3b623d7,38049,1733180945779 because future has completed 2024-12-02T23:14:35,268 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=198, resume processing ppid=197 2024-12-02T23:14:35,269 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=198, ppid=197, state=SUCCESS, hasLock=false; OpenRegionProcedure b34bb32e23f8345a23dcec225714af08, server=9e5ab3b623d7,41311,1733180945828 in 199 msec 2024-12-02T23:14:35,270 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=197, ppid=195, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=b34bb32e23f8345a23dcec225714af08, ASSIGN in 361 msec 2024-12-02T23:14:35,271 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=199, resume processing ppid=196 2024-12-02T23:14:35,271 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=199, ppid=196, state=SUCCESS, hasLock=false; OpenRegionProcedure 4f2b7b3c8280b03935fda802a9eb2d2e, server=9e5ab3b623d7,38049,1733180945779 in 204 msec 2024-12-02T23:14:35,272 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=196, resume processing ppid=195 2024-12-02T23:14:35,272 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=196, ppid=195, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=4f2b7b3c8280b03935fda802a9eb2d2e, ASSIGN in 363 msec 2024-12-02T23:14:35,273 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T23:14:35,273 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181275273"}]},"ts":"1733181275273"} 2024-12-02T23:14:35,274 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLED in hbase:meta 2024-12-02T23:14:35,275 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T23:14:35,275 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testEmptyExportFileSystemState jenkins: RWXCA 2024-12-02T23:14:35,277 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41311 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-02T23:14:35,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:14:35,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:14:35,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:14:35,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:14:35,282 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:35,282 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:35,282 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:35,282 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:35,282 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:35,282 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:35,282 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:35,282 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:35,284 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=195, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState in 889 msec 2024-12-02T23:14:35,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-02T23:14:35,542 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-02T23:14:35,542 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T23:14:35,544 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-12-02T23:14:35,544 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1733181274393.4f2b7b3c8280b03935fda802a9eb2d2e. 2024-12-02T23:14:35,545 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T23:14:35,546 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T23:14:35,550 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T23:14:35,554 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T23:14:35,556 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-02T23:14:35,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733181275556 (current time:1733181275556). 2024-12-02T23:14:35,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T23:14:35,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-02T23:14:35,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T23:14:35,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@441e94a1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:35,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:14:35,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:14:35,558 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:14:35,558 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:14:35,558 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:14:35,558 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ef6b3f6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:35,558 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:14:35,558 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:14:35,558 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:35,559 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59260, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:14:35,559 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c715c4c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:35,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:14:35,560 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:14:35,560 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:14:35,561 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53056, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:14:35,562 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513. 2024-12-02T23:14:35,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:14:35,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:35,562 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:14:35,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:35,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b38f28d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:35,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:14:35,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:14:35,563 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:14:35,564 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:14:35,564 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:14:35,564 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@221c511, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:35,564 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:14:35,564 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:14:35,564 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:35,565 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59268, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:14:35,565 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bc4abcb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:35,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:14:35,566 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:14:35,567 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:14:35,567 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53064, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:14:35,568 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b., hostname=9e5ab3b623d7,41311,1733180945828, seqNum=2] 2024-12-02T23:14:35,568 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:14:35,569 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33690, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:14:35,570 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513. 2024-12-02T23:14:35,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor223.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:14:35,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:35,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:35,570 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:14:35,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-02T23:14:35,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T23:14:35,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=200, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-02T23:14:35,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 200 2024-12-02T23:14:35,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-02T23:14:35,573 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T23:14:35,575 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T23:14:35,578 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T23:14:35,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742274_1450 (size=185) 2024-12-02T23:14:35,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742274_1450 (size=185) 2024-12-02T23:14:35,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742274_1450 (size=185) 2024-12-02T23:14:35,596 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T23:14:35,597 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4f2b7b3c8280b03935fda802a9eb2d2e}, {pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b34bb32e23f8345a23dcec225714af08}] 2024-12-02T23:14:35,597 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b34bb32e23f8345a23dcec225714af08 2024-12-02T23:14:35,597 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4f2b7b3c8280b03935fda802a9eb2d2e 2024-12-02T23:14:35,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-02T23:14:35,749 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41311 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=202 2024-12-02T23:14:35,749 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38049 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=201 2024-12-02T23:14:35,749 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1733181274393.4f2b7b3c8280b03935fda802a9eb2d2e. 2024-12-02T23:14:35,749 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1733181274393.b34bb32e23f8345a23dcec225714af08. 2024-12-02T23:14:35,749 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(2603): Flush status journal for b34bb32e23f8345a23dcec225714af08: 2024-12-02T23:14:35,749 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.HRegion(2603): Flush status journal for 4f2b7b3c8280b03935fda802a9eb2d2e: 2024-12-02T23:14:35,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1733181274393.b34bb32e23f8345a23dcec225714af08. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-02T23:14:35,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1733181274393.4f2b7b3c8280b03935fda802a9eb2d2e. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-02T23:14:35,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1733181274393.b34bb32e23f8345a23dcec225714af08.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-02T23:14:35,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1733181274393.4f2b7b3c8280b03935fda802a9eb2d2e.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-02T23:14:35,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:14:35,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:14:35,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T23:14:35,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T23:14:35,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742275_1451 (size=76) 2024-12-02T23:14:35,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742276_1452 (size=76) 2024-12-02T23:14:35,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742275_1451 (size=76) 2024-12-02T23:14:35,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742276_1452 (size=76) 2024-12-02T23:14:35,757 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1733181274393.4f2b7b3c8280b03935fda802a9eb2d2e. 2024-12-02T23:14:35,757 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1733181274393.b34bb32e23f8345a23dcec225714af08. 2024-12-02T23:14:35,757 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=201 2024-12-02T23:14:35,757 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=202 2024-12-02T23:14:35,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=201 2024-12-02T23:14:35,758 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 4f2b7b3c8280b03935fda802a9eb2d2e 2024-12-02T23:14:35,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=202 2024-12-02T23:14:35,758 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region b34bb32e23f8345a23dcec225714af08 2024-12-02T23:14:35,758 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b34bb32e23f8345a23dcec225714af08 2024-12-02T23:14:35,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742275_1451 (size=76) 2024-12-02T23:14:35,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742276_1452 (size=76) 2024-12-02T23:14:35,758 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4f2b7b3c8280b03935fda802a9eb2d2e 2024-12-02T23:14:35,760 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=202, ppid=200, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b34bb32e23f8345a23dcec225714af08 in 162 msec 2024-12-02T23:14:35,761 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=201, resume processing ppid=200 2024-12-02T23:14:35,761 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=201, ppid=200, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 4f2b7b3c8280b03935fda802a9eb2d2e in 162 msec 2024-12-02T23:14:35,761 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T23:14:35,762 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T23:14:35,763 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T23:14:35,763 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T23:14:35,763 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:14:35,763 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-02T23:14:35,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742277_1453 (size=68) 2024-12-02T23:14:35,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742277_1453 (size=68) 2024-12-02T23:14:35,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742277_1453 (size=68) 2024-12-02T23:14:35,770 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T23:14:35,770 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testEmptyExportFileSystemState 2024-12-02T23:14:35,771 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-02T23:14:35,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742278_1454 (size=673) 2024-12-02T23:14:35,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742278_1454 (size=673) 2024-12-02T23:14:35,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742278_1454 (size=673) 2024-12-02T23:14:35,780 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T23:14:35,783 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T23:14:35,784 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-02T23:14:35,785 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T23:14:35,785 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 200 2024-12-02T23:14:35,786 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=200, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 214 msec 2024-12-02T23:14:35,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-02T23:14:35,892 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-02T23:14:35,897 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38049 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,,1733181274393.4f2b7b3c8280b03935fda802a9eb2d2e. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T23:14:35,899 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41311 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,1,1733181274393.b34bb32e23f8345a23dcec225714af08. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T23:14:35,900 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T23:14:35,902 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-12-02T23:14:35,902 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1733181274393.4f2b7b3c8280b03935fda802a9eb2d2e. 2024-12-02T23:14:35,902 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T23:14:35,904 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T23:14:35,908 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T23:14:35,912 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T23:14:35,914 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-02T23:14:35,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733181275914 (current time:1733181275914). 2024-12-02T23:14:35,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T23:14:35,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-02T23:14:35,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T23:14:35,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e4283d0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:35,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:14:35,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:14:35,915 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:14:35,915 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:14:35,916 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:14:35,916 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@617aa92, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:35,916 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:14:35,916 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:14:35,916 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:35,917 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59276, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:14:35,917 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e2775c9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:35,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:14:35,918 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:14:35,918 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:14:35,919 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53070, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:14:35,920 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:14:35,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:14:35,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:35,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:35,920 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:14:35,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c3a1eab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:35,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:14:35,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:14:35,921 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:14:35,921 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:14:35,922 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:14:35,922 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a2a5d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:35,922 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:14:35,922 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:14:35,922 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:35,922 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:59288, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:14:35,923 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@52eb8b79, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:35,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:14:35,924 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:14:35,924 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:14:35,925 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53072, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:14:35,925 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b., hostname=9e5ab3b623d7,41311,1733180945828, seqNum=2] 2024-12-02T23:14:35,926 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:14:35,926 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33706, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:14:35,927 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:14:35,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor223.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:14:35,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:35,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:35,927 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:14:35,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-02T23:14:35,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T23:14:35,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-02T23:14:35,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-02T23:14:35,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-02T23:14:35,930 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T23:14:35,930 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T23:14:35,932 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T23:14:35,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742279_1455 (size=180) 2024-12-02T23:14:35,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742279_1455 (size=180) 2024-12-02T23:14:35,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742279_1455 (size=180) 2024-12-02T23:14:35,938 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T23:14:35,938 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4f2b7b3c8280b03935fda802a9eb2d2e}, {pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b34bb32e23f8345a23dcec225714af08}] 2024-12-02T23:14:35,939 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4f2b7b3c8280b03935fda802a9eb2d2e 2024-12-02T23:14:35,939 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b34bb32e23f8345a23dcec225714af08 2024-12-02T23:14:36,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-02T23:14:36,091 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38049 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=204 2024-12-02T23:14:36,091 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1733181274393.4f2b7b3c8280b03935fda802a9eb2d2e. 2024-12-02T23:14:36,092 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2902): Flushing 4f2b7b3c8280b03935fda802a9eb2d2e 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-02T23:14:36,092 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41311 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=205 2024-12-02T23:14:36,092 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1733181274393.b34bb32e23f8345a23dcec225714af08. 2024-12-02T23:14:36,093 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2902): Flushing b34bb32e23f8345a23dcec225714af08 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-02T23:14:36,124 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202c41e5f3356fe4ef9954fbb5a24fdb588_4f2b7b3c8280b03935fda802a9eb2d2e is 71, key is 0ad2a0861704cc0c2433b67da2ae2327/cf:q/1733181275897/Put/seqid=0 2024-12-02T23:14:36,124 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024120260b78134d3fe4d94ad0f82bccf9a8c09_b34bb32e23f8345a23dcec225714af08 is 71, key is 13e76368a5eb2c198d42415e762dafe8/cf:q/1733181275899/Put/seqid=0 2024-12-02T23:14:36,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742280_1456 (size=5102) 2024-12-02T23:14:36,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742280_1456 (size=5102) 2024-12-02T23:14:36,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742280_1456 (size=5102) 2024-12-02T23:14:36,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742281_1457 (size=8172) 2024-12-02T23:14:36,145 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:14:36,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742281_1457 (size=8172) 2024-12-02T23:14:36,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742281_1457 (size=8172) 2024-12-02T23:14:36,146 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:14:36,149 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024120260b78134d3fe4d94ad0f82bccf9a8c09_b34bb32e23f8345a23dcec225714af08 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b2024120260b78134d3fe4d94ad0f82bccf9a8c09_b34bb32e23f8345a23dcec225714af08 2024-12-02T23:14:36,149 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241202c41e5f3356fe4ef9954fbb5a24fdb588_4f2b7b3c8280b03935fda802a9eb2d2e to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241202c41e5f3356fe4ef9954fbb5a24fdb588_4f2b7b3c8280b03935fda802a9eb2d2e 2024-12-02T23:14:36,150 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testEmptyExportFileSystemState/b34bb32e23f8345a23dcec225714af08/.tmp/cf/4fd3e722c11c40a7b0bd1b7db63ecc72, store: [table=testtb-testEmptyExportFileSystemState family=cf region=b34bb32e23f8345a23dcec225714af08] 2024-12-02T23:14:36,150 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testEmptyExportFileSystemState/4f2b7b3c8280b03935fda802a9eb2d2e/.tmp/cf/c363be27276f4729a26953b9508088dc, store: [table=testtb-testEmptyExportFileSystemState family=cf region=4f2b7b3c8280b03935fda802a9eb2d2e] 2024-12-02T23:14:36,150 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testEmptyExportFileSystemState/b34bb32e23f8345a23dcec225714af08/.tmp/cf/4fd3e722c11c40a7b0bd1b7db63ecc72 is 214, key is 10a7fea04677cf103fb12e0ff9ee63c00/cf:q/1733181275899/Put/seqid=0 2024-12-02T23:14:36,151 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testEmptyExportFileSystemState/4f2b7b3c8280b03935fda802a9eb2d2e/.tmp/cf/c363be27276f4729a26953b9508088dc is 214, key is 013f406e56727098245dc6d92f27499bd/cf:q/1733181275897/Put/seqid=0 2024-12-02T23:14:36,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742282_1458 (size=15239) 2024-12-02T23:14:36,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742282_1458 (size=15239) 2024-12-02T23:14:36,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742283_1459 (size=5938) 2024-12-02T23:14:36,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742283_1459 (size=5938) 2024-12-02T23:14:36,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742282_1458 (size=15239) 2024-12-02T23:14:36,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742283_1459 (size=5938) 2024-12-02T23:14:36,157 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testEmptyExportFileSystemState/4f2b7b3c8280b03935fda802a9eb2d2e/.tmp/cf/c363be27276f4729a26953b9508088dc 2024-12-02T23:14:36,160 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testEmptyExportFileSystemState/4f2b7b3c8280b03935fda802a9eb2d2e/.tmp/cf/c363be27276f4729a26953b9508088dc as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testEmptyExportFileSystemState/4f2b7b3c8280b03935fda802a9eb2d2e/cf/c363be27276f4729a26953b9508088dc 2024-12-02T23:14:36,164 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testEmptyExportFileSystemState/4f2b7b3c8280b03935fda802a9eb2d2e/cf/c363be27276f4729a26953b9508088dc, entries=3, sequenceid=6, filesize=5.8 K 2024-12-02T23:14:36,165 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 4f2b7b3c8280b03935fda802a9eb2d2e in 74ms, sequenceid=6, compaction requested=false 2024-12-02T23:14:36,165 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-12-02T23:14:36,166 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2603): Flush status journal for 4f2b7b3c8280b03935fda802a9eb2d2e: 2024-12-02T23:14:36,166 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1733181274393.4f2b7b3c8280b03935fda802a9eb2d2e. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-02T23:14:36,166 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1733181274393.4f2b7b3c8280b03935fda802a9eb2d2e.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-02T23:14:36,166 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:14:36,166 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testEmptyExportFileSystemState/4f2b7b3c8280b03935fda802a9eb2d2e/cf/c363be27276f4729a26953b9508088dc] hfiles 2024-12-02T23:14:36,166 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testEmptyExportFileSystemState/4f2b7b3c8280b03935fda802a9eb2d2e/cf/c363be27276f4729a26953b9508088dc for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-02T23:14:36,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742284_1460 (size=115) 2024-12-02T23:14:36,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742284_1460 (size=115) 2024-12-02T23:14:36,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742284_1460 (size=115) 2024-12-02T23:14:36,171 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1733181274393.4f2b7b3c8280b03935fda802a9eb2d2e. 2024-12-02T23:14:36,171 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=204 2024-12-02T23:14:36,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=204 2024-12-02T23:14:36,172 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 4f2b7b3c8280b03935fda802a9eb2d2e 2024-12-02T23:14:36,172 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4f2b7b3c8280b03935fda802a9eb2d2e 2024-12-02T23:14:36,174 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=204, ppid=203, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 4f2b7b3c8280b03935fda802a9eb2d2e in 235 msec 2024-12-02T23:14:36,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-02T23:14:36,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-02T23:14:36,556 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testEmptyExportFileSystemState/b34bb32e23f8345a23dcec225714af08/.tmp/cf/4fd3e722c11c40a7b0bd1b7db63ecc72 2024-12-02T23:14:36,562 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testEmptyExportFileSystemState/b34bb32e23f8345a23dcec225714af08/.tmp/cf/4fd3e722c11c40a7b0bd1b7db63ecc72 as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testEmptyExportFileSystemState/b34bb32e23f8345a23dcec225714af08/cf/4fd3e722c11c40a7b0bd1b7db63ecc72 2024-12-02T23:14:36,567 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testEmptyExportFileSystemState/b34bb32e23f8345a23dcec225714af08/cf/4fd3e722c11c40a7b0bd1b7db63ecc72, entries=47, sequenceid=6, filesize=14.9 K 2024-12-02T23:14:36,568 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for b34bb32e23f8345a23dcec225714af08 in 475ms, sequenceid=6, compaction requested=false 2024-12-02T23:14:36,568 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2603): Flush status journal for b34bb32e23f8345a23dcec225714af08: 2024-12-02T23:14:36,568 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1733181274393.b34bb32e23f8345a23dcec225714af08. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-02T23:14:36,568 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1733181274393.b34bb32e23f8345a23dcec225714af08.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-02T23:14:36,568 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:14:36,568 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testEmptyExportFileSystemState/b34bb32e23f8345a23dcec225714af08/cf/4fd3e722c11c40a7b0bd1b7db63ecc72] hfiles 2024-12-02T23:14:36,568 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testEmptyExportFileSystemState/b34bb32e23f8345a23dcec225714af08/cf/4fd3e722c11c40a7b0bd1b7db63ecc72 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-02T23:14:36,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742285_1461 (size=115) 2024-12-02T23:14:36,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742285_1461 (size=115) 2024-12-02T23:14:36,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742285_1461 (size=115) 2024-12-02T23:14:36,586 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1733181274393.b34bb32e23f8345a23dcec225714af08. 2024-12-02T23:14:36,587 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=205 2024-12-02T23:14:36,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=205 2024-12-02T23:14:36,587 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region b34bb32e23f8345a23dcec225714af08 2024-12-02T23:14:36,587 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b34bb32e23f8345a23dcec225714af08 2024-12-02T23:14:36,590 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=205, resume processing ppid=203 2024-12-02T23:14:36,591 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=205, ppid=203, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b34bb32e23f8345a23dcec225714af08 in 650 msec 2024-12-02T23:14:36,591 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T23:14:36,591 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T23:14:36,592 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T23:14:36,592 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T23:14:36,592 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:14:36,593 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b2024120260b78134d3fe4d94ad0f82bccf9a8c09_b34bb32e23f8345a23dcec225714af08, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241202c41e5f3356fe4ef9954fbb5a24fdb588_4f2b7b3c8280b03935fda802a9eb2d2e] hfiles 2024-12-02T23:14:36,593 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b2024120260b78134d3fe4d94ad0f82bccf9a8c09_b34bb32e23f8345a23dcec225714af08 2024-12-02T23:14:36,593 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241202c41e5f3356fe4ef9954fbb5a24fdb588_4f2b7b3c8280b03935fda802a9eb2d2e 2024-12-02T23:14:36,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742286_1462 (size=299) 2024-12-02T23:14:36,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742286_1462 (size=299) 2024-12-02T23:14:36,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742286_1462 (size=299) 2024-12-02T23:14:36,600 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T23:14:36,600 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testEmptyExportFileSystemState 2024-12-02T23:14:36,601 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState 2024-12-02T23:14:36,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742287_1463 (size=983) 2024-12-02T23:14:36,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742287_1463 (size=983) 2024-12-02T23:14:36,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742287_1463 (size=983) 2024-12-02T23:14:36,610 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T23:14:36,615 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T23:14:36,615 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testEmptyExportFileSystemState 2024-12-02T23:14:36,616 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T23:14:36,616 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-02T23:14:36,617 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=203, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 688 msec 2024-12-02T23:14:37,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-02T23:14:37,062 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-02T23:14:37,062 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181277062 2024-12-02T23:14:37,062 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:33369, tgtDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181277062, rawTgtDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181277062, srcFsUri=hdfs://localhost:33369, srcDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:14:37,090 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:33369, inputRoot=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:14:37,090 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_71179475_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181277062, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181277062/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-02T23:14:37,092 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-02T23:14:37,095 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181277062/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-02T23:14:37,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742288_1464 (size=673) 2024-12-02T23:14:37,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742288_1464 (size=673) 2024-12-02T23:14:37,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742289_1465 (size=185) 2024-12-02T23:14:37,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742289_1465 (size=185) 2024-12-02T23:14:37,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742288_1464 (size=673) 2024-12-02T23:14:37,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742289_1465 (size=185) 2024-12-02T23:14:37,110 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:14:37,110 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:14:37,111 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:14:38,317 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop-13534942348372757688.jar 2024-12-02T23:14:38,318 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:14:38,318 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:14:38,386 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop-3289884875469599061.jar 2024-12-02T23:14:38,387 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:14:38,387 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:14:38,387 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:14:38,387 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:14:38,388 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:14:38,388 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:14:38,388 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-02T23:14:38,389 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-02T23:14:38,389 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-02T23:14:38,389 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-02T23:14:38,389 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-02T23:14:38,390 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-02T23:14:38,390 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-02T23:14:38,390 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-02T23:14:38,390 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-02T23:14:38,390 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-02T23:14:38,391 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-02T23:14:38,391 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:14:38,391 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:14:38,391 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T23:14:38,392 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:14:38,392 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:14:38,392 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T23:14:38,392 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T23:14:38,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742290_1466 (size=131440) 2024-12-02T23:14:38,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742290_1466 (size=131440) 2024-12-02T23:14:38,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742290_1466 (size=131440) 2024-12-02T23:14:38,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742291_1467 (size=4188619) 2024-12-02T23:14:38,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742291_1467 (size=4188619) 2024-12-02T23:14:38,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742291_1467 (size=4188619) 2024-12-02T23:14:38,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742292_1468 (size=1323991) 2024-12-02T23:14:38,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742292_1468 (size=1323991) 2024-12-02T23:14:38,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742292_1468 (size=1323991) 2024-12-02T23:14:38,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742293_1469 (size=903926) 2024-12-02T23:14:38,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742293_1469 (size=903926) 2024-12-02T23:14:38,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742293_1469 (size=903926) 2024-12-02T23:14:38,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742294_1470 (size=8360083) 2024-12-02T23:14:38,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742294_1470 (size=8360083) 2024-12-02T23:14:38,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742294_1470 (size=8360083) 2024-12-02T23:14:38,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742295_1471 (size=1877034) 2024-12-02T23:14:38,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742295_1471 (size=1877034) 2024-12-02T23:14:38,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742295_1471 (size=1877034) 2024-12-02T23:14:38,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742296_1472 (size=77835) 2024-12-02T23:14:38,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742296_1472 (size=77835) 2024-12-02T23:14:38,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742296_1472 (size=77835) 2024-12-02T23:14:38,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742297_1473 (size=30949) 2024-12-02T23:14:38,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742297_1473 (size=30949) 2024-12-02T23:14:38,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742297_1473 (size=30949) 2024-12-02T23:14:38,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742298_1474 (size=1597213) 2024-12-02T23:14:38,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742298_1474 (size=1597213) 2024-12-02T23:14:38,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742298_1474 (size=1597213) 2024-12-02T23:14:38,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742299_1475 (size=4695811) 2024-12-02T23:14:38,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742299_1475 (size=4695811) 2024-12-02T23:14:38,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742299_1475 (size=4695811) 2024-12-02T23:14:38,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742300_1476 (size=232957) 2024-12-02T23:14:38,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742300_1476 (size=232957) 2024-12-02T23:14:38,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742300_1476 (size=232957) 2024-12-02T23:14:38,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742301_1477 (size=127628) 2024-12-02T23:14:38,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742301_1477 (size=127628) 2024-12-02T23:14:38,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742301_1477 (size=127628) 2024-12-02T23:14:38,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742302_1478 (size=20406) 2024-12-02T23:14:38,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742302_1478 (size=20406) 2024-12-02T23:14:38,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742302_1478 (size=20406) 2024-12-02T23:14:38,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742303_1479 (size=5175431) 2024-12-02T23:14:38,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742303_1479 (size=5175431) 2024-12-02T23:14:38,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742303_1479 (size=5175431) 2024-12-02T23:14:38,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742304_1480 (size=6424743) 2024-12-02T23:14:38,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742304_1480 (size=6424743) 2024-12-02T23:14:38,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742304_1480 (size=6424743) 2024-12-02T23:14:38,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742305_1481 (size=217634) 2024-12-02T23:14:38,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742305_1481 (size=217634) 2024-12-02T23:14:38,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742305_1481 (size=217634) 2024-12-02T23:14:38,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742306_1482 (size=1832290) 2024-12-02T23:14:38,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742306_1482 (size=1832290) 2024-12-02T23:14:38,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742306_1482 (size=1832290) 2024-12-02T23:14:38,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742307_1483 (size=322274) 2024-12-02T23:14:38,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742307_1483 (size=322274) 2024-12-02T23:14:38,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742307_1483 (size=322274) 2024-12-02T23:14:38,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742308_1484 (size=503880) 2024-12-02T23:14:38,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742308_1484 (size=503880) 2024-12-02T23:14:38,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742308_1484 (size=503880) 2024-12-02T23:14:38,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742309_1485 (size=443172) 2024-12-02T23:14:38,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742309_1485 (size=443172) 2024-12-02T23:14:38,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742309_1485 (size=443172) 2024-12-02T23:14:38,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742310_1486 (size=29229) 2024-12-02T23:14:38,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742310_1486 (size=29229) 2024-12-02T23:14:38,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742310_1486 (size=29229) 2024-12-02T23:14:38,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742311_1487 (size=24096) 2024-12-02T23:14:38,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742311_1487 (size=24096) 2024-12-02T23:14:38,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742311_1487 (size=24096) 2024-12-02T23:14:38,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742312_1488 (size=111872) 2024-12-02T23:14:38,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742312_1488 (size=111872) 2024-12-02T23:14:38,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742312_1488 (size=111872) 2024-12-02T23:14:38,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742313_1489 (size=45609) 2024-12-02T23:14:38,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742313_1489 (size=45609) 2024-12-02T23:14:38,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742313_1489 (size=45609) 2024-12-02T23:14:38,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742314_1490 (size=136454) 2024-12-02T23:14:38,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742314_1490 (size=136454) 2024-12-02T23:14:38,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742314_1490 (size=136454) 2024-12-02T23:14:38,810 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-02T23:14:38,812 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'emptySnaptb0-testEmptyExportFileSystemState' hfile list 2024-12-02T23:14:38,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742315_1491 (size=7) 2024-12-02T23:14:38,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742315_1491 (size=7) 2024-12-02T23:14:38,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742315_1491 (size=7) 2024-12-02T23:14:38,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742316_1492 (size=10) 2024-12-02T23:14:38,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742316_1492 (size=10) 2024-12-02T23:14:38,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742316_1492 (size=10) 2024-12-02T23:14:38,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742317_1493 (size=303901) 2024-12-02T23:14:38,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742317_1493 (size=303901) 2024-12-02T23:14:38,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742317_1493 (size=303901) 2024-12-02T23:14:38,883 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T23:14:38,883 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T23:14:39,419 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T23:14:39,757 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0008_000001 (auth:SIMPLE) from 127.0.0.1:50878 2024-12-02T23:14:45,205 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-02T23:14:45,205 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-02T23:14:45,877 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0008_000001 (auth:SIMPLE) from 127.0.0.1:51342 2024-12-02T23:14:46,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742318_1494 (size=349575) 2024-12-02T23:14:46,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742318_1494 (size=349575) 2024-12-02T23:14:46,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742318_1494 (size=349575) 2024-12-02T23:14:47,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742319_1495 (size=8568) 2024-12-02T23:14:47,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742319_1495 (size=8568) 2024-12-02T23:14:47,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742319_1495 (size=8568) 2024-12-02T23:14:47,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742320_1496 (size=460) 2024-12-02T23:14:47,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742320_1496 (size=460) 2024-12-02T23:14:47,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742320_1496 (size=460) 2024-12-02T23:14:47,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742321_1497 (size=8568) 2024-12-02T23:14:47,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742321_1497 (size=8568) 2024-12-02T23:14:47,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742321_1497 (size=8568) 2024-12-02T23:14:47,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742322_1498 (size=349575) 2024-12-02T23:14:47,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742322_1498 (size=349575) 2024-12-02T23:14:47,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742322_1498 (size=349575) 2024-12-02T23:14:49,055 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-02T23:14:49,062 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-02T23:14:49,081 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: emptySnaptb0-testEmptyExportFileSystemState 2024-12-02T23:14:49,081 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-02T23:14:49,082 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-02T23:14:49,082 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_71179475_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-02T23:14:49,083 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-02T23:14:49,083 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-02T23:14:49,083 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_71179475_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181277062/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181277062/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-02T23:14:49,083 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181277062/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-02T23:14:49,083 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181277062/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-02T23:14:49,091 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testEmptyExportFileSystemState 2024-12-02T23:14:49,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=206, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-02T23:14:49,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-12-02T23:14:49,096 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181289095"}]},"ts":"1733181289095"} 2024-12-02T23:14:49,098 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLING in hbase:meta 2024-12-02T23:14:49,098 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testEmptyExportFileSystemState to state=DISABLING 2024-12-02T23:14:49,099 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=207, ppid=206, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState}] 2024-12-02T23:14:49,101 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=4f2b7b3c8280b03935fda802a9eb2d2e, UNASSIGN}, {pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=b34bb32e23f8345a23dcec225714af08, UNASSIGN}] 2024-12-02T23:14:49,102 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=b34bb32e23f8345a23dcec225714af08, UNASSIGN 2024-12-02T23:14:49,102 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=4f2b7b3c8280b03935fda802a9eb2d2e, UNASSIGN 2024-12-02T23:14:49,103 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=208 updating hbase:meta row=4f2b7b3c8280b03935fda802a9eb2d2e, regionState=CLOSING, regionLocation=9e5ab3b623d7,38049,1733180945779 2024-12-02T23:14:49,103 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=209 updating hbase:meta row=b34bb32e23f8345a23dcec225714af08, regionState=CLOSING, regionLocation=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:14:49,105 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=4f2b7b3c8280b03935fda802a9eb2d2e, UNASSIGN because future has completed 2024-12-02T23:14:49,105 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T23:14:49,105 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=210, ppid=208, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4f2b7b3c8280b03935fda802a9eb2d2e, server=9e5ab3b623d7,38049,1733180945779}] 2024-12-02T23:14:49,106 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=b34bb32e23f8345a23dcec225714af08, UNASSIGN because future has completed 2024-12-02T23:14:49,107 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T23:14:49,107 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=211, ppid=209, state=RUNNABLE, hasLock=false; CloseRegionProcedure b34bb32e23f8345a23dcec225714af08, server=9e5ab3b623d7,41311,1733180945828}] 2024-12-02T23:14:49,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-12-02T23:14:49,258 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(122): Close 4f2b7b3c8280b03935fda802a9eb2d2e 2024-12-02T23:14:49,259 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T23:14:49,259 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1722): Closing 4f2b7b3c8280b03935fda802a9eb2d2e, disabling compactions & flushes 2024-12-02T23:14:49,259 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1733181274393.4f2b7b3c8280b03935fda802a9eb2d2e. 2024-12-02T23:14:49,259 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1733181274393.4f2b7b3c8280b03935fda802a9eb2d2e. 2024-12-02T23:14:49,259 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1733181274393.4f2b7b3c8280b03935fda802a9eb2d2e. after waiting 0 ms 2024-12-02T23:14:49,259 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1733181274393.4f2b7b3c8280b03935fda802a9eb2d2e. 2024-12-02T23:14:49,260 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(122): Close b34bb32e23f8345a23dcec225714af08 2024-12-02T23:14:49,260 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T23:14:49,260 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1722): Closing b34bb32e23f8345a23dcec225714af08, disabling compactions & flushes 2024-12-02T23:14:49,260 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1733181274393.b34bb32e23f8345a23dcec225714af08. 2024-12-02T23:14:49,260 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1733181274393.b34bb32e23f8345a23dcec225714af08. 2024-12-02T23:14:49,260 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1733181274393.b34bb32e23f8345a23dcec225714af08. after waiting 0 ms 2024-12-02T23:14:49,260 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1733181274393.b34bb32e23f8345a23dcec225714af08. 2024-12-02T23:14:49,268 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testEmptyExportFileSystemState/b34bb32e23f8345a23dcec225714af08/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T23:14:49,268 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testEmptyExportFileSystemState/4f2b7b3c8280b03935fda802a9eb2d2e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T23:14:49,268 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:14:49,268 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:14:49,268 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1733181274393.b34bb32e23f8345a23dcec225714af08. 2024-12-02T23:14:49,268 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1733181274393.4f2b7b3c8280b03935fda802a9eb2d2e. 2024-12-02T23:14:49,269 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1676): Region close journal for b34bb32e23f8345a23dcec225714af08: Waiting for close lock at 1733181289260Running coprocessor pre-close hooks at 1733181289260Disabling compacts and flushes for region at 1733181289260Disabling writes for close at 1733181289260Writing region close event to WAL at 1733181289261 (+1 ms)Running coprocessor post-close hooks at 1733181289268 (+7 ms)Closed at 1733181289268 2024-12-02T23:14:49,269 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1676): Region close journal for 4f2b7b3c8280b03935fda802a9eb2d2e: Waiting for close lock at 1733181289259Running coprocessor pre-close hooks at 1733181289259Disabling compacts and flushes for region at 1733181289259Disabling writes for close at 1733181289259Writing region close event to WAL at 1733181289259Running coprocessor post-close hooks at 1733181289268 (+9 ms)Closed at 1733181289268 2024-12-02T23:14:49,270 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(157): Closed b34bb32e23f8345a23dcec225714af08 2024-12-02T23:14:49,271 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=209 updating hbase:meta row=b34bb32e23f8345a23dcec225714af08, regionState=CLOSED 2024-12-02T23:14:49,271 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(157): Closed 4f2b7b3c8280b03935fda802a9eb2d2e 2024-12-02T23:14:49,274 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=208 updating hbase:meta row=4f2b7b3c8280b03935fda802a9eb2d2e, regionState=CLOSED 2024-12-02T23:14:49,275 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=211, ppid=209, state=RUNNABLE, hasLock=false; CloseRegionProcedure b34bb32e23f8345a23dcec225714af08, server=9e5ab3b623d7,41311,1733180945828 because future has completed 2024-12-02T23:14:49,276 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=210, ppid=208, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4f2b7b3c8280b03935fda802a9eb2d2e, server=9e5ab3b623d7,38049,1733180945779 because future has completed 2024-12-02T23:14:49,280 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=211, resume processing ppid=209 2024-12-02T23:14:49,280 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=211, ppid=209, state=SUCCESS, hasLock=false; CloseRegionProcedure b34bb32e23f8345a23dcec225714af08, server=9e5ab3b623d7,41311,1733180945828 in 170 msec 2024-12-02T23:14:49,280 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=210, resume processing ppid=208 2024-12-02T23:14:49,281 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=210, ppid=208, state=SUCCESS, hasLock=false; CloseRegionProcedure 4f2b7b3c8280b03935fda802a9eb2d2e, server=9e5ab3b623d7,38049,1733180945779 in 173 msec 2024-12-02T23:14:49,282 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=209, ppid=207, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=b34bb32e23f8345a23dcec225714af08, UNASSIGN in 179 msec 2024-12-02T23:14:49,283 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=208, resume processing ppid=207 2024-12-02T23:14:49,283 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=208, ppid=207, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=4f2b7b3c8280b03935fda802a9eb2d2e, UNASSIGN in 180 msec 2024-12-02T23:14:49,287 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=207, resume processing ppid=206 2024-12-02T23:14:49,287 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=207, ppid=206, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState in 185 msec 2024-12-02T23:14:49,288 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181289288"}]},"ts":"1733181289288"} 2024-12-02T23:14:49,290 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLED in hbase:meta 2024-12-02T23:14:49,290 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testEmptyExportFileSystemState to state=DISABLED 2024-12-02T23:14:49,292 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=206, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState in 200 msec 2024-12-02T23:14:49,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-12-02T23:14:49,413 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-02T23:14:49,414 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testEmptyExportFileSystemState 2024-12-02T23:14:49,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=212, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-02T23:14:49,416 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=212, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-02T23:14:49,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testEmptyExportFileSystemState 2024-12-02T23:14:49,417 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=212, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-02T23:14:49,419 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41311 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testEmptyExportFileSystemState 2024-12-02T23:14:49,422 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testEmptyExportFileSystemState/b34bb32e23f8345a23dcec225714af08 2024-12-02T23:14:49,422 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testEmptyExportFileSystemState/4f2b7b3c8280b03935fda802a9eb2d2e 2024-12-02T23:14:49,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-02T23:14:49,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-02T23:14:49,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-02T23:14:49,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-02T23:14:49,424 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-02T23:14:49,424 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-02T23:14:49,424 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-02T23:14:49,425 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testEmptyExportFileSystemState/4f2b7b3c8280b03935fda802a9eb2d2e/cf, FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testEmptyExportFileSystemState/4f2b7b3c8280b03935fda802a9eb2d2e/recovered.edits] 2024-12-02T23:14:49,425 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testEmptyExportFileSystemState/b34bb32e23f8345a23dcec225714af08/cf, FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testEmptyExportFileSystemState/b34bb32e23f8345a23dcec225714af08/recovered.edits] 2024-12-02T23:14:49,426 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-02T23:14:49,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-02T23:14:49,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-02T23:14:49,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:14:49,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:14:49,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-02T23:14:49,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:14:49,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-02T23:14:49,430 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:14:49,430 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:49,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=212 2024-12-02T23:14:49,432 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:49,432 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:49,432 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:49,434 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testEmptyExportFileSystemState/4f2b7b3c8280b03935fda802a9eb2d2e/cf/c363be27276f4729a26953b9508088dc to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testEmptyExportFileSystemState/4f2b7b3c8280b03935fda802a9eb2d2e/cf/c363be27276f4729a26953b9508088dc 2024-12-02T23:14:49,434 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testEmptyExportFileSystemState/b34bb32e23f8345a23dcec225714af08/cf/4fd3e722c11c40a7b0bd1b7db63ecc72 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testEmptyExportFileSystemState/b34bb32e23f8345a23dcec225714af08/cf/4fd3e722c11c40a7b0bd1b7db63ecc72 2024-12-02T23:14:49,437 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testEmptyExportFileSystemState/4f2b7b3c8280b03935fda802a9eb2d2e/recovered.edits/9.seqid to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testEmptyExportFileSystemState/4f2b7b3c8280b03935fda802a9eb2d2e/recovered.edits/9.seqid 2024-12-02T23:14:49,438 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testEmptyExportFileSystemState/b34bb32e23f8345a23dcec225714af08/recovered.edits/9.seqid to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testEmptyExportFileSystemState/b34bb32e23f8345a23dcec225714af08/recovered.edits/9.seqid 2024-12-02T23:14:49,438 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testEmptyExportFileSystemState/b34bb32e23f8345a23dcec225714af08 2024-12-02T23:14:49,438 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testEmptyExportFileSystemState/4f2b7b3c8280b03935fda802a9eb2d2e 2024-12-02T23:14:49,439 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testEmptyExportFileSystemState regions 2024-12-02T23:14:49,439 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0 2024-12-02T23:14:49,440 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf] 2024-12-02T23:14:49,444 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b2024120260b78134d3fe4d94ad0f82bccf9a8c09_b34bb32e23f8345a23dcec225714af08 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b2024120260b78134d3fe4d94ad0f82bccf9a8c09_b34bb32e23f8345a23dcec225714af08 2024-12-02T23:14:49,445 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241202c41e5f3356fe4ef9954fbb5a24fdb588_4f2b7b3c8280b03935fda802a9eb2d2e to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241202c41e5f3356fe4ef9954fbb5a24fdb588_4f2b7b3c8280b03935fda802a9eb2d2e 2024-12-02T23:14:49,446 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0 2024-12-02T23:14:49,448 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=212, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-02T23:14:49,451 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testEmptyExportFileSystemState from hbase:meta 2024-12-02T23:14:49,453 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testEmptyExportFileSystemState' descriptor. 2024-12-02T23:14:49,454 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=212, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-02T23:14:49,454 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testEmptyExportFileSystemState' from region states. 2024-12-02T23:14:49,454 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,,1733181274393.4f2b7b3c8280b03935fda802a9eb2d2e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733181289454"}]},"ts":"9223372036854775807"} 2024-12-02T23:14:49,454 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,1,1733181274393.b34bb32e23f8345a23dcec225714af08.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733181289454"}]},"ts":"9223372036854775807"} 2024-12-02T23:14:49,457 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-02T23:14:49,457 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 4f2b7b3c8280b03935fda802a9eb2d2e, NAME => 'testtb-testEmptyExportFileSystemState,,1733181274393.4f2b7b3c8280b03935fda802a9eb2d2e.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => b34bb32e23f8345a23dcec225714af08, NAME => 'testtb-testEmptyExportFileSystemState,1,1733181274393.b34bb32e23f8345a23dcec225714af08.', STARTKEY => '1', ENDKEY => ''}] 2024-12-02T23:14:49,457 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testEmptyExportFileSystemState' as deleted. 2024-12-02T23:14:49,457 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733181289457"}]},"ts":"9223372036854775807"} 2024-12-02T23:14:49,459 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testEmptyExportFileSystemState state from META 2024-12-02T23:14:49,460 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=212, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-02T23:14:49,461 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=212, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState in 46 msec 2024-12-02T23:14:49,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=212 2024-12-02T23:14:49,543 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testEmptyExportFileSystemState 2024-12-02T23:14:49,543 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-02T23:14:49,549 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-12-02T23:14:49,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testEmptyExportFileSystemState 2024-12-02T23:14:49,552 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-12-02T23:14:49,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testEmptyExportFileSystemState 2024-12-02T23:14:49,598 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testEmptyExportFileSystemState Thread=813 (was 802) Potentially hanging thread: IPC Client (243496960) connection to localhost/127.0.0.1:39223 from appattempt_1733180955226_0008_000001 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (243496960) connection to localhost/127.0.0.1:34047 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_71179475_22 at /127.0.0.1:59030 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 15155) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_673426571_1 at /127.0.0.1:45402 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_71179475_22 at /127.0.0.1:33466 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39223 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-7358 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34047 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_71179475_22 at /127.0.0.1:45422 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=790 (was 777) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=968 (was 1001), ProcessCount=19 (was 16) - ProcessCount LEAK? -, AvailableMemoryMB=1031 (was 2115) 2024-12-02T23:14:49,598 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=813 is superior to 500 2024-12-02T23:14:49,623 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithChecksum Thread=813, OpenFileDescriptor=790, MaxFileDescriptor=1048576, SystemLoadAverage=968, ProcessCount=19, AvailableMemoryMB=1031 2024-12-02T23:14:49,624 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=813 is superior to 500 2024-12-02T23:14:49,626 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T23:14:49,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=213, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum 2024-12-02T23:14:49,628 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T23:14:49,628 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithChecksum" procId is: 213 2024-12-02T23:14:49,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-02T23:14:49,630 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T23:14:49,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742323_1499 (size=440) 2024-12-02T23:14:49,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742323_1499 (size=440) 2024-12-02T23:14:49,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742323_1499 (size=440) 2024-12-02T23:14:49,648 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 688a3c000ee460cf14e89bc5b111e260, NAME => 'testtb-testExportWithChecksum,,1733181289625.688a3c000ee460cf14e89bc5b111e260.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:14:49,648 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 407a27bcc253c4f32756dbd0df885650, NAME => 'testtb-testExportWithChecksum,1,1733181289625.407a27bcc253c4f32756dbd0df885650.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:14:49,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742324_1500 (size=65) 2024-12-02T23:14:49,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742324_1500 (size=65) 2024-12-02T23:14:49,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742324_1500 (size=65) 2024-12-02T23:14:49,666 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1733181289625.407a27bcc253c4f32756dbd0df885650.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:14:49,666 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1722): Closing 407a27bcc253c4f32756dbd0df885650, disabling compactions & flushes 2024-12-02T23:14:49,666 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1733181289625.407a27bcc253c4f32756dbd0df885650. 2024-12-02T23:14:49,666 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1733181289625.407a27bcc253c4f32756dbd0df885650. 2024-12-02T23:14:49,666 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1733181289625.407a27bcc253c4f32756dbd0df885650. after waiting 0 ms 2024-12-02T23:14:49,666 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1733181289625.407a27bcc253c4f32756dbd0df885650. 2024-12-02T23:14:49,666 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1733181289625.407a27bcc253c4f32756dbd0df885650. 2024-12-02T23:14:49,666 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1676): Region close journal for 407a27bcc253c4f32756dbd0df885650: Waiting for close lock at 1733181289666Disabling compacts and flushes for region at 1733181289666Disabling writes for close at 1733181289666Writing region close event to WAL at 1733181289666Closed at 1733181289666 2024-12-02T23:14:49,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742325_1501 (size=65) 2024-12-02T23:14:49,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742325_1501 (size=65) 2024-12-02T23:14:49,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742325_1501 (size=65) 2024-12-02T23:14:49,678 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1733181289625.688a3c000ee460cf14e89bc5b111e260.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:14:49,678 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1722): Closing 688a3c000ee460cf14e89bc5b111e260, disabling compactions & flushes 2024-12-02T23:14:49,678 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1733181289625.688a3c000ee460cf14e89bc5b111e260. 2024-12-02T23:14:49,678 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1733181289625.688a3c000ee460cf14e89bc5b111e260. 2024-12-02T23:14:49,678 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1733181289625.688a3c000ee460cf14e89bc5b111e260. after waiting 0 ms 2024-12-02T23:14:49,678 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1733181289625.688a3c000ee460cf14e89bc5b111e260. 2024-12-02T23:14:49,678 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1733181289625.688a3c000ee460cf14e89bc5b111e260. 2024-12-02T23:14:49,678 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1676): Region close journal for 688a3c000ee460cf14e89bc5b111e260: Waiting for close lock at 1733181289678Disabling compacts and flushes for region at 1733181289678Disabling writes for close at 1733181289678Writing region close event to WAL at 1733181289678Closed at 1733181289678 2024-12-02T23:14:49,679 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T23:14:49,680 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,1,1733181289625.407a27bcc253c4f32756dbd0df885650.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733181289679"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733181289679"}]},"ts":"1733181289679"} 2024-12-02T23:14:49,680 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,,1733181289625.688a3c000ee460cf14e89bc5b111e260.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733181289679"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733181289679"}]},"ts":"1733181289679"} 2024-12-02T23:14:49,683 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T23:14:49,684 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T23:14:49,684 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181289684"}]},"ts":"1733181289684"} 2024-12-02T23:14:49,686 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLING in hbase:meta 2024-12-02T23:14:49,686 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {9e5ab3b623d7=0} racks are {/default-rack=0} 2024-12-02T23:14:49,687 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T23:14:49,687 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T23:14:49,687 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T23:14:49,687 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T23:14:49,687 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T23:14:49,687 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T23:14:49,687 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T23:14:49,687 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T23:14:49,687 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T23:14:49,687 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T23:14:49,687 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=688a3c000ee460cf14e89bc5b111e260, ASSIGN}, {pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=407a27bcc253c4f32756dbd0df885650, ASSIGN}] 2024-12-02T23:14:49,688 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=688a3c000ee460cf14e89bc5b111e260, ASSIGN 2024-12-02T23:14:49,688 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=407a27bcc253c4f32756dbd0df885650, ASSIGN 2024-12-02T23:14:49,689 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=407a27bcc253c4f32756dbd0df885650, ASSIGN; state=OFFLINE, location=9e5ab3b623d7,38049,1733180945779; forceNewPlan=false, retain=false 2024-12-02T23:14:49,689 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=688a3c000ee460cf14e89bc5b111e260, ASSIGN; state=OFFLINE, location=9e5ab3b623d7,41311,1733180945828; forceNewPlan=false, retain=false 2024-12-02T23:14:49,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-02T23:14:49,840 INFO [9e5ab3b623d7:41513 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T23:14:49,840 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=215 updating hbase:meta row=407a27bcc253c4f32756dbd0df885650, regionState=OPENING, regionLocation=9e5ab3b623d7,38049,1733180945779 2024-12-02T23:14:49,840 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=214 updating hbase:meta row=688a3c000ee460cf14e89bc5b111e260, regionState=OPENING, regionLocation=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:14:49,843 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=407a27bcc253c4f32756dbd0df885650, ASSIGN because future has completed 2024-12-02T23:14:49,843 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=688a3c000ee460cf14e89bc5b111e260, ASSIGN because future has completed 2024-12-02T23:14:49,843 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=216, ppid=215, state=RUNNABLE, hasLock=false; OpenRegionProcedure 407a27bcc253c4f32756dbd0df885650, server=9e5ab3b623d7,38049,1733180945779}] 2024-12-02T23:14:49,845 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=217, ppid=214, state=RUNNABLE, hasLock=false; OpenRegionProcedure 688a3c000ee460cf14e89bc5b111e260, server=9e5ab3b623d7,41311,1733180945828}] 2024-12-02T23:14:49,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-02T23:14:49,999 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,1,1733181289625.407a27bcc253c4f32756dbd0df885650. 2024-12-02T23:14:49,999 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7752): Opening region: {ENCODED => 407a27bcc253c4f32756dbd0df885650, NAME => 'testtb-testExportWithChecksum,1,1733181289625.407a27bcc253c4f32756dbd0df885650.', STARTKEY => '1', ENDKEY => ''} 2024-12-02T23:14:50,000 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1733181289625.407a27bcc253c4f32756dbd0df885650. service=AccessControlService 2024-12-02T23:14:50,000 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T23:14:50,000 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 407a27bcc253c4f32756dbd0df885650 2024-12-02T23:14:50,000 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1733181289625.407a27bcc253c4f32756dbd0df885650.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:14:50,000 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7794): checking encryption for 407a27bcc253c4f32756dbd0df885650 2024-12-02T23:14:50,000 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7797): checking classloading for 407a27bcc253c4f32756dbd0df885650 2024-12-02T23:14:50,001 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,,1733181289625.688a3c000ee460cf14e89bc5b111e260. 2024-12-02T23:14:50,001 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7752): Opening region: {ENCODED => 688a3c000ee460cf14e89bc5b111e260, NAME => 'testtb-testExportWithChecksum,,1733181289625.688a3c000ee460cf14e89bc5b111e260.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T23:14:50,001 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,,1733181289625.688a3c000ee460cf14e89bc5b111e260. service=AccessControlService 2024-12-02T23:14:50,001 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T23:14:50,001 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 688a3c000ee460cf14e89bc5b111e260 2024-12-02T23:14:50,001 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1733181289625.688a3c000ee460cf14e89bc5b111e260.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:14:50,001 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7794): checking encryption for 688a3c000ee460cf14e89bc5b111e260 2024-12-02T23:14:50,001 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7797): checking classloading for 688a3c000ee460cf14e89bc5b111e260 2024-12-02T23:14:50,002 INFO [StoreOpener-407a27bcc253c4f32756dbd0df885650-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 407a27bcc253c4f32756dbd0df885650 2024-12-02T23:14:50,003 INFO [StoreOpener-688a3c000ee460cf14e89bc5b111e260-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 688a3c000ee460cf14e89bc5b111e260 2024-12-02T23:14:50,003 INFO [StoreOpener-407a27bcc253c4f32756dbd0df885650-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 407a27bcc253c4f32756dbd0df885650 columnFamilyName cf 2024-12-02T23:14:50,004 INFO [StoreOpener-688a3c000ee460cf14e89bc5b111e260-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 688a3c000ee460cf14e89bc5b111e260 columnFamilyName cf 2024-12-02T23:14:50,004 DEBUG [StoreOpener-407a27bcc253c4f32756dbd0df885650-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:14:50,004 DEBUG [StoreOpener-688a3c000ee460cf14e89bc5b111e260-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:14:50,004 INFO [StoreOpener-407a27bcc253c4f32756dbd0df885650-1 {}] regionserver.HStore(327): Store=407a27bcc253c4f32756dbd0df885650/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T23:14:50,005 INFO [StoreOpener-688a3c000ee460cf14e89bc5b111e260-1 {}] regionserver.HStore(327): Store=688a3c000ee460cf14e89bc5b111e260/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T23:14:50,005 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1038): replaying wal for 407a27bcc253c4f32756dbd0df885650 2024-12-02T23:14:50,005 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1038): replaying wal for 688a3c000ee460cf14e89bc5b111e260 2024-12-02T23:14:50,005 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/407a27bcc253c4f32756dbd0df885650 2024-12-02T23:14:50,005 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/407a27bcc253c4f32756dbd0df885650 2024-12-02T23:14:50,006 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/688a3c000ee460cf14e89bc5b111e260 2024-12-02T23:14:50,006 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1048): stopping wal replay for 407a27bcc253c4f32756dbd0df885650 2024-12-02T23:14:50,006 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1060): Cleaning up temporary data for 407a27bcc253c4f32756dbd0df885650 2024-12-02T23:14:50,006 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/688a3c000ee460cf14e89bc5b111e260 2024-12-02T23:14:50,006 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1048): stopping wal replay for 688a3c000ee460cf14e89bc5b111e260 2024-12-02T23:14:50,006 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1060): Cleaning up temporary data for 688a3c000ee460cf14e89bc5b111e260 2024-12-02T23:14:50,007 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1093): writing seq id for 407a27bcc253c4f32756dbd0df885650 2024-12-02T23:14:50,008 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1093): writing seq id for 688a3c000ee460cf14e89bc5b111e260 2024-12-02T23:14:50,008 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/407a27bcc253c4f32756dbd0df885650/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T23:14:50,009 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1114): Opened 407a27bcc253c4f32756dbd0df885650; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68676723, jitterRate=0.023362919688224792}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T23:14:50,009 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 407a27bcc253c4f32756dbd0df885650 2024-12-02T23:14:50,009 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/688a3c000ee460cf14e89bc5b111e260/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T23:14:50,009 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1006): Region open journal for 407a27bcc253c4f32756dbd0df885650: Running coprocessor pre-open hook at 1733181290000Writing region info on filesystem at 1733181290000Initializing all the Stores at 1733181290001 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733181290001Cleaning up temporary data from old regions at 1733181290006 (+5 ms)Running coprocessor post-open hooks at 1733181290009 (+3 ms)Region opened successfully at 1733181290009 2024-12-02T23:14:50,010 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1114): Opened 688a3c000ee460cf14e89bc5b111e260; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73855391, jitterRate=0.10053108632564545}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T23:14:50,010 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 688a3c000ee460cf14e89bc5b111e260 2024-12-02T23:14:50,010 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1006): Region open journal for 688a3c000ee460cf14e89bc5b111e260: Running coprocessor pre-open hook at 1733181290002Writing region info on filesystem at 1733181290002Initializing all the Stores at 1733181290002Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733181290002Cleaning up temporary data from old regions at 1733181290006 (+4 ms)Running coprocessor post-open hooks at 1733181290010 (+4 ms)Region opened successfully at 1733181290010 2024-12-02T23:14:50,010 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,1,1733181289625.407a27bcc253c4f32756dbd0df885650., pid=216, masterSystemTime=1733181289996 2024-12-02T23:14:50,010 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,,1733181289625.688a3c000ee460cf14e89bc5b111e260., pid=217, masterSystemTime=1733181289998 2024-12-02T23:14:50,012 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,1,1733181289625.407a27bcc253c4f32756dbd0df885650. 2024-12-02T23:14:50,012 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,1,1733181289625.407a27bcc253c4f32756dbd0df885650. 2024-12-02T23:14:50,013 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=215 updating hbase:meta row=407a27bcc253c4f32756dbd0df885650, regionState=OPEN, openSeqNum=2, regionLocation=9e5ab3b623d7,38049,1733180945779 2024-12-02T23:14:50,013 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,,1733181289625.688a3c000ee460cf14e89bc5b111e260. 2024-12-02T23:14:50,013 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,,1733181289625.688a3c000ee460cf14e89bc5b111e260. 2024-12-02T23:14:50,014 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=214 updating hbase:meta row=688a3c000ee460cf14e89bc5b111e260, regionState=OPEN, openSeqNum=2, regionLocation=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:14:50,014 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41513 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=9e5ab3b623d7,41311,1733180945828, table=testtb-testExportWithChecksum, region=688a3c000ee460cf14e89bc5b111e260. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-02T23:14:50,014 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=216, ppid=215, state=RUNNABLE, hasLock=false; OpenRegionProcedure 407a27bcc253c4f32756dbd0df885650, server=9e5ab3b623d7,38049,1733180945779 because future has completed 2024-12-02T23:14:50,015 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=217, ppid=214, state=RUNNABLE, hasLock=false; OpenRegionProcedure 688a3c000ee460cf14e89bc5b111e260, server=9e5ab3b623d7,41311,1733180945828 because future has completed 2024-12-02T23:14:50,017 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=216, resume processing ppid=215 2024-12-02T23:14:50,017 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=216, ppid=215, state=SUCCESS, hasLock=false; OpenRegionProcedure 407a27bcc253c4f32756dbd0df885650, server=9e5ab3b623d7,38049,1733180945779 in 172 msec 2024-12-02T23:14:50,018 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=217, resume processing ppid=214 2024-12-02T23:14:50,018 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=217, ppid=214, state=SUCCESS, hasLock=false; OpenRegionProcedure 688a3c000ee460cf14e89bc5b111e260, server=9e5ab3b623d7,41311,1733180945828 in 172 msec 2024-12-02T23:14:50,019 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=215, ppid=213, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=407a27bcc253c4f32756dbd0df885650, ASSIGN in 330 msec 2024-12-02T23:14:50,020 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=214, resume processing ppid=213 2024-12-02T23:14:50,020 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=214, ppid=213, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=688a3c000ee460cf14e89bc5b111e260, ASSIGN in 331 msec 2024-12-02T23:14:50,020 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T23:14:50,021 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181290020"}]},"ts":"1733181290020"} 2024-12-02T23:14:50,022 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLED in hbase:meta 2024-12-02T23:14:50,023 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T23:14:50,023 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithChecksum jenkins: RWXCA 2024-12-02T23:14:50,025 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41311 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-02T23:14:50,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:14:50,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:14:50,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:14:50,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:14:50,029 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:50,029 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:50,029 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:50,029 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:50,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-02T23:14:50,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-02T23:14:50,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-02T23:14:50,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-02T23:14:50,032 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:50,032 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:50,033 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:50,033 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-02T23:14:50,036 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=213, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum in 406 msec 2024-12-02T23:14:50,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-02T23:14:50,253 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-02T23:14:50,253 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-02T23:14:50,257 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-12-02T23:14:50,257 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1733181289625.688a3c000ee460cf14e89bc5b111e260. 2024-12-02T23:14:50,258 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T23:14:50,261 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-02T23:14:50,268 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-02T23:14:50,273 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-02T23:14:50,276 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-02T23:14:50,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733181290276 (current time:1733181290276). 2024-12-02T23:14:50,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T23:14:50,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-02T23:14:50,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T23:14:50,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11ed72ef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:50,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:14:50,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:14:50,292 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:14:50,292 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:14:50,292 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:14:50,292 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@afa9efa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:50,292 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:14:50,293 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:14:50,293 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:50,294 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38284, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:14:50,294 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a7ecf5e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:50,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:14:50,295 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:14:50,296 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:14:50,297 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57350, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:14:50,298 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:14:50,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:14:50,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:50,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:50,298 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:14:50,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20e7c619, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:50,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:14:50,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:14:50,307 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:14:50,308 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:14:50,308 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:14:50,308 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c715fbb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:50,308 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:14:50,308 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:14:50,308 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:50,309 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38294, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:14:50,310 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ecc7395, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:50,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:14:50,312 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:14:50,312 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:14:50,313 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57352, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:14:50,315 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b., hostname=9e5ab3b623d7,41311,1733180945828, seqNum=2] 2024-12-02T23:14:50,315 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:14:50,316 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52344, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:14:50,317 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:14:50,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor223.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:14:50,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:50,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:50,317 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:14:50,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-02T23:14:50,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T23:14:50,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=218, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-02T23:14:50,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 218 2024-12-02T23:14:50,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-02T23:14:50,327 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T23:14:50,329 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T23:14:50,332 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T23:14:50,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742326_1502 (size=161) 2024-12-02T23:14:50,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742326_1502 (size=161) 2024-12-02T23:14:50,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742326_1502 (size=161) 2024-12-02T23:14:50,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-02T23:14:50,523 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-12-02T23:14:50,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-02T23:14:50,780 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T23:14:50,780 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 688a3c000ee460cf14e89bc5b111e260}, {pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 407a27bcc253c4f32756dbd0df885650}] 2024-12-02T23:14:50,781 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 688a3c000ee460cf14e89bc5b111e260 2024-12-02T23:14:50,781 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 407a27bcc253c4f32756dbd0df885650 2024-12-02T23:14:50,932 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41311 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=219 2024-12-02T23:14:50,932 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=38049 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=220 2024-12-02T23:14:50,933 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1733181289625.688a3c000ee460cf14e89bc5b111e260. 2024-12-02T23:14:50,933 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1733181289625.407a27bcc253c4f32756dbd0df885650. 2024-12-02T23:14:50,933 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.HRegion(2603): Flush status journal for 407a27bcc253c4f32756dbd0df885650: 2024-12-02T23:14:50,933 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.HRegion(2603): Flush status journal for 688a3c000ee460cf14e89bc5b111e260: 2024-12-02T23:14:50,933 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1733181289625.407a27bcc253c4f32756dbd0df885650. for emptySnaptb0-testExportWithChecksum completed. 2024-12-02T23:14:50,933 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1733181289625.688a3c000ee460cf14e89bc5b111e260. for emptySnaptb0-testExportWithChecksum completed. 2024-12-02T23:14:50,933 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1733181289625.407a27bcc253c4f32756dbd0df885650.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-02T23:14:50,933 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:14:50,933 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1733181289625.688a3c000ee460cf14e89bc5b111e260.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-02T23:14:50,933 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T23:14:50,933 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:14:50,933 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T23:14:50,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742327_1503 (size=68) 2024-12-02T23:14:50,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742327_1503 (size=68) 2024-12-02T23:14:50,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742327_1503 (size=68) 2024-12-02T23:14:50,943 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1733181289625.407a27bcc253c4f32756dbd0df885650. 2024-12-02T23:14:50,943 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=220 2024-12-02T23:14:50,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=220 2024-12-02T23:14:50,944 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 407a27bcc253c4f32756dbd0df885650 2024-12-02T23:14:50,944 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 407a27bcc253c4f32756dbd0df885650 2024-12-02T23:14:50,946 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=220, ppid=218, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 407a27bcc253c4f32756dbd0df885650 in 165 msec 2024-12-02T23:14:50,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742328_1504 (size=68) 2024-12-02T23:14:50,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742328_1504 (size=68) 2024-12-02T23:14:50,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742328_1504 (size=68) 2024-12-02T23:14:50,951 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1733181289625.688a3c000ee460cf14e89bc5b111e260. 2024-12-02T23:14:50,951 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=219 2024-12-02T23:14:50,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=219 2024-12-02T23:14:50,952 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 688a3c000ee460cf14e89bc5b111e260 2024-12-02T23:14:50,952 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 688a3c000ee460cf14e89bc5b111e260 2024-12-02T23:14:50,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-02T23:14:50,954 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=219, resume processing ppid=218 2024-12-02T23:14:50,954 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=219, ppid=218, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 688a3c000ee460cf14e89bc5b111e260 in 173 msec 2024-12-02T23:14:50,954 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T23:14:50,955 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T23:14:50,956 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T23:14:50,956 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T23:14:50,956 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:14:50,956 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-02T23:14:50,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742329_1505 (size=60) 2024-12-02T23:14:50,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742329_1505 (size=60) 2024-12-02T23:14:50,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742329_1505 (size=60) 2024-12-02T23:14:50,969 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T23:14:50,969 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithChecksum 2024-12-02T23:14:50,970 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum 2024-12-02T23:14:50,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742330_1506 (size=641) 2024-12-02T23:14:50,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742330_1506 (size=641) 2024-12-02T23:14:50,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742330_1506 (size=641) 2024-12-02T23:14:50,986 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T23:14:50,991 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T23:14:50,991 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/emptySnaptb0-testExportWithChecksum 2024-12-02T23:14:50,992 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T23:14:50,992 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 218 2024-12-02T23:14:50,994 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=218, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 674 msec 2024-12-02T23:14:51,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-02T23:14:51,462 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-12-02T23:14:51,467 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41311 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,,1733181289625.688a3c000ee460cf14e89bc5b111e260. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T23:14:51,468 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38049 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,1,1733181289625.407a27bcc253c4f32756dbd0df885650. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T23:14:51,469 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-02T23:14:51,471 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-12-02T23:14:51,471 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1733181289625.688a3c000ee460cf14e89bc5b111e260. 2024-12-02T23:14:51,471 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T23:14:51,473 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-02T23:14:51,477 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-02T23:14:51,481 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-02T23:14:51,483 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-02T23:14:51,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733181291484 (current time:1733181291484). 2024-12-02T23:14:51,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T23:14:51,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-02T23:14:51,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T23:14:51,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c1c7200, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:51,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:14:51,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:14:51,485 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:14:51,485 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:14:51,485 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:14:51,486 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68a0f794, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:51,486 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:14:51,486 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:14:51,486 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:51,486 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38314, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:14:51,487 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e62a719, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:51,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:14:51,488 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:14:51,488 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:14:51,489 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57362, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:14:51,489 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513. 2024-12-02T23:14:51,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:14:51,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:51,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:51,490 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:14:51,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ed4ab1b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:51,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:14:51,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:14:51,491 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:14:51,491 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:14:51,491 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:14:51,491 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c2dd85f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:51,491 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:14:51,491 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:14:51,492 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:51,492 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:38320, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:14:51,493 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22702f48, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:14:51,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:14:51,494 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:14:51,494 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:14:51,494 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57366, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:14:51,495 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b., hostname=9e5ab3b623d7,41311,1733180945828, seqNum=2] 2024-12-02T23:14:51,496 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:14:51,496 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52346, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:14:51,497 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513. 2024-12-02T23:14:51,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor223.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:14:51,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:51,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:14:51,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-02T23:14:51,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T23:14:51,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=221, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-02T23:14:51,501 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:14:51,501 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T23:14:51,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 221 2024-12-02T23:14:51,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-02T23:14:51,502 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T23:14:51,504 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T23:14:51,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742331_1507 (size=156) 2024-12-02T23:14:51,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742331_1507 (size=156) 2024-12-02T23:14:51,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742331_1507 (size=156) 2024-12-02T23:14:51,511 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T23:14:51,511 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 688a3c000ee460cf14e89bc5b111e260}, {pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 407a27bcc253c4f32756dbd0df885650}] 2024-12-02T23:14:51,512 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 407a27bcc253c4f32756dbd0df885650 2024-12-02T23:14:51,512 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 688a3c000ee460cf14e89bc5b111e260 2024-12-02T23:14:51,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-02T23:14:51,663 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41311 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=222 2024-12-02T23:14:51,663 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=38049 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=223 2024-12-02T23:14:51,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1733181289625.688a3c000ee460cf14e89bc5b111e260. 2024-12-02T23:14:51,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1733181289625.407a27bcc253c4f32756dbd0df885650. 2024-12-02T23:14:51,664 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(2902): Flushing 688a3c000ee460cf14e89bc5b111e260 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-02T23:14:51,664 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(2902): Flushing 407a27bcc253c4f32756dbd0df885650 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-02T23:14:51,682 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241202a6cd36a43b014576b340f7d38e3d2ca2_407a27bcc253c4f32756dbd0df885650 is 71, key is 1100ef01232444129b2b36f86a6557c6/cf:q/1733181291468/Put/seqid=0 2024-12-02T23:14:51,687 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412029026115027cb414889ff9ff2f773fc6a_688a3c000ee460cf14e89bc5b111e260 is 71, key is 0b1b92587b82cbaee9b3a742a8efb0fc/cf:q/1733181291467/Put/seqid=0 2024-12-02T23:14:51,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742332_1508 (size=8171) 2024-12-02T23:14:51,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742332_1508 (size=8171) 2024-12-02T23:14:51,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742332_1508 (size=8171) 2024-12-02T23:14:51,691 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:14:51,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742333_1509 (size=5102) 2024-12-02T23:14:51,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742333_1509 (size=5102) 2024-12-02T23:14:51,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742333_1509 (size=5102) 2024-12-02T23:14:51,694 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:14:51,697 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241202a6cd36a43b014576b340f7d38e3d2ca2_407a27bcc253c4f32756dbd0df885650 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241202a6cd36a43b014576b340f7d38e3d2ca2_407a27bcc253c4f32756dbd0df885650 2024-12-02T23:14:51,705 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412029026115027cb414889ff9ff2f773fc6a_688a3c000ee460cf14e89bc5b111e260 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e202412029026115027cb414889ff9ff2f773fc6a_688a3c000ee460cf14e89bc5b111e260 2024-12-02T23:14:51,706 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/688a3c000ee460cf14e89bc5b111e260/.tmp/cf/4c38af9624654f17b889450939e09497, store: [table=testtb-testExportWithChecksum family=cf region=688a3c000ee460cf14e89bc5b111e260] 2024-12-02T23:14:51,707 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/688a3c000ee460cf14e89bc5b111e260/.tmp/cf/4c38af9624654f17b889450939e09497 is 206, key is 075ba7e29638cedc5e57e0d6831f05a8a/cf:q/1733181291467/Put/seqid=0 2024-12-02T23:14:51,709 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/407a27bcc253c4f32756dbd0df885650/.tmp/cf/bd4dd6f316514682bc2d902115305ef0, store: [table=testtb-testExportWithChecksum family=cf region=407a27bcc253c4f32756dbd0df885650] 2024-12-02T23:14:51,709 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/407a27bcc253c4f32756dbd0df885650/.tmp/cf/bd4dd6f316514682bc2d902115305ef0 is 206, key is 145350851b71bdbaf8277ea11d1434592/cf:q/1733181291468/Put/seqid=0 2024-12-02T23:14:51,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742334_1510 (size=5906) 2024-12-02T23:14:51,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742334_1510 (size=5906) 2024-12-02T23:14:51,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742335_1511 (size=14853) 2024-12-02T23:14:51,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742334_1510 (size=5906) 2024-12-02T23:14:51,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742335_1511 (size=14853) 2024-12-02T23:14:51,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742335_1511 (size=14853) 2024-12-02T23:14:51,733 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/688a3c000ee460cf14e89bc5b111e260/.tmp/cf/4c38af9624654f17b889450939e09497 2024-12-02T23:14:51,733 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/407a27bcc253c4f32756dbd0df885650/.tmp/cf/bd4dd6f316514682bc2d902115305ef0 2024-12-02T23:14:51,741 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/407a27bcc253c4f32756dbd0df885650/.tmp/cf/bd4dd6f316514682bc2d902115305ef0 as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/407a27bcc253c4f32756dbd0df885650/cf/bd4dd6f316514682bc2d902115305ef0 2024-12-02T23:14:51,743 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/688a3c000ee460cf14e89bc5b111e260/.tmp/cf/4c38af9624654f17b889450939e09497 as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/688a3c000ee460cf14e89bc5b111e260/cf/4c38af9624654f17b889450939e09497 2024-12-02T23:14:51,748 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/688a3c000ee460cf14e89bc5b111e260/cf/4c38af9624654f17b889450939e09497, entries=3, sequenceid=6, filesize=5.8 K 2024-12-02T23:14:51,749 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/407a27bcc253c4f32756dbd0df885650/cf/bd4dd6f316514682bc2d902115305ef0, entries=47, sequenceid=6, filesize=14.5 K 2024-12-02T23:14:51,749 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 688a3c000ee460cf14e89bc5b111e260 in 85ms, sequenceid=6, compaction requested=false 2024-12-02T23:14:51,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(2603): Flush status journal for 688a3c000ee460cf14e89bc5b111e260: 2024-12-02T23:14:51,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1733181289625.688a3c000ee460cf14e89bc5b111e260. for snaptb0-testExportWithChecksum completed. 2024-12-02T23:14:51,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1733181289625.688a3c000ee460cf14e89bc5b111e260.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-02T23:14:51,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:14:51,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/688a3c000ee460cf14e89bc5b111e260/cf/4c38af9624654f17b889450939e09497] hfiles 2024-12-02T23:14:51,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/688a3c000ee460cf14e89bc5b111e260/cf/4c38af9624654f17b889450939e09497 for snapshot=snaptb0-testExportWithChecksum 2024-12-02T23:14:51,750 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 407a27bcc253c4f32756dbd0df885650 in 86ms, sequenceid=6, compaction requested=false 2024-12-02T23:14:51,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(2603): Flush status journal for 407a27bcc253c4f32756dbd0df885650: 2024-12-02T23:14:51,750 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1733181289625.407a27bcc253c4f32756dbd0df885650. for snaptb0-testExportWithChecksum completed. 2024-12-02T23:14:51,751 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1733181289625.407a27bcc253c4f32756dbd0df885650.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-02T23:14:51,751 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:14:51,751 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/407a27bcc253c4f32756dbd0df885650/cf/bd4dd6f316514682bc2d902115305ef0] hfiles 2024-12-02T23:14:51,751 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/407a27bcc253c4f32756dbd0df885650/cf/bd4dd6f316514682bc2d902115305ef0 for snapshot=snaptb0-testExportWithChecksum 2024-12-02T23:14:51,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742337_1513 (size=107) 2024-12-02T23:14:51,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742337_1513 (size=107) 2024-12-02T23:14:51,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742337_1513 (size=107) 2024-12-02T23:14:51,771 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1733181289625.407a27bcc253c4f32756dbd0df885650. 2024-12-02T23:14:51,771 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=223 2024-12-02T23:14:51,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=223 2024-12-02T23:14:51,772 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 407a27bcc253c4f32756dbd0df885650 2024-12-02T23:14:51,772 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 407a27bcc253c4f32756dbd0df885650 2024-12-02T23:14:51,774 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=223, ppid=221, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 407a27bcc253c4f32756dbd0df885650 in 261 msec 2024-12-02T23:14:51,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742336_1512 (size=107) 2024-12-02T23:14:51,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742336_1512 (size=107) 2024-12-02T23:14:51,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742336_1512 (size=107) 2024-12-02T23:14:51,776 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1733181289625.688a3c000ee460cf14e89bc5b111e260. 2024-12-02T23:14:51,776 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=222 2024-12-02T23:14:51,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=222 2024-12-02T23:14:51,776 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 688a3c000ee460cf14e89bc5b111e260 2024-12-02T23:14:51,777 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 688a3c000ee460cf14e89bc5b111e260 2024-12-02T23:14:51,779 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=222, resume processing ppid=221 2024-12-02T23:14:51,779 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=222, ppid=221, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 688a3c000ee460cf14e89bc5b111e260 in 266 msec 2024-12-02T23:14:51,779 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T23:14:51,780 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T23:14:51,781 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T23:14:51,781 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T23:14:51,781 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:14:51,782 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241202a6cd36a43b014576b340f7d38e3d2ca2_407a27bcc253c4f32756dbd0df885650, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e202412029026115027cb414889ff9ff2f773fc6a_688a3c000ee460cf14e89bc5b111e260] hfiles 2024-12-02T23:14:51,782 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241202a6cd36a43b014576b340f7d38e3d2ca2_407a27bcc253c4f32756dbd0df885650 2024-12-02T23:14:51,782 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e202412029026115027cb414889ff9ff2f773fc6a_688a3c000ee460cf14e89bc5b111e260 2024-12-02T23:14:51,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742338_1514 (size=291) 2024-12-02T23:14:51,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742338_1514 (size=291) 2024-12-02T23:14:51,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742338_1514 (size=291) 2024-12-02T23:14:51,797 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T23:14:51,797 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithChecksum 2024-12-02T23:14:51,798 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-02T23:14:51,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742339_1515 (size=951) 2024-12-02T23:14:51,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742339_1515 (size=951) 2024-12-02T23:14:51,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742339_1515 (size=951) 2024-12-02T23:14:51,819 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T23:14:51,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-02T23:14:51,825 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T23:14:51,826 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-02T23:14:51,828 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T23:14:51,828 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 221 2024-12-02T23:14:51,830 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=221, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 329 msec 2024-12-02T23:14:52,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-02T23:14:52,133 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-12-02T23:14:52,133 INFO [Time-limited test {}] snapshot.TestExportSnapshot(523): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181292133 2024-12-02T23:14:52,133 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181292133, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181292133, srcFsUri=hdfs://localhost:33369, srcDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:14:52,172 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:33369, inputRoot=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:14:52,172 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@5011d6cb, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181292133, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181292133/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-02T23:14:52,174 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-02T23:14:52,179 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testExportWithChecksum to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181292133/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-02T23:14:52,235 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:14:52,235 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:14:52,235 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:14:53,320 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0008_000001 (auth:SIMPLE) from 127.0.0.1:56202 2024-12-02T23:14:53,350 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_3/usercache/jenkins/appcache/application_1733180955226_0008/container_1733180955226_0008_01_000001/launch_container.sh] 2024-12-02T23:14:53,350 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_3/usercache/jenkins/appcache/application_1733180955226_0008/container_1733180955226_0008_01_000001/container_tokens] 2024-12-02T23:14:53,350 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_3/usercache/jenkins/appcache/application_1733180955226_0008/container_1733180955226_0008_01_000001/sysfs] 2024-12-02T23:14:53,506 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop-9462312146706855416.jar 2024-12-02T23:14:53,507 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:14:53,507 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:14:53,596 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop-4886655733629009155.jar 2024-12-02T23:14:53,596 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:14:53,597 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:14:53,597 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:14:53,597 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:14:53,598 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:14:53,598 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:14:53,598 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-02T23:14:53,599 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-02T23:14:53,599 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-02T23:14:53,600 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-02T23:14:53,600 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-02T23:14:53,600 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-02T23:14:53,601 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-02T23:14:53,601 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-02T23:14:53,601 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-02T23:14:53,601 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-02T23:14:53,602 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-02T23:14:53,602 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:14:53,602 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:14:53,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T23:14:53,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:14:53,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:14:53,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T23:14:53,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T23:14:53,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742340_1516 (size=131440) 2024-12-02T23:14:53,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742340_1516 (size=131440) 2024-12-02T23:14:53,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742340_1516 (size=131440) 2024-12-02T23:14:53,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742341_1517 (size=4188619) 2024-12-02T23:14:53,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742341_1517 (size=4188619) 2024-12-02T23:14:53,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742341_1517 (size=4188619) 2024-12-02T23:14:53,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742342_1518 (size=1323991) 2024-12-02T23:14:53,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742342_1518 (size=1323991) 2024-12-02T23:14:53,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742342_1518 (size=1323991) 2024-12-02T23:14:53,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742343_1519 (size=903926) 2024-12-02T23:14:53,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742343_1519 (size=903926) 2024-12-02T23:14:53,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742343_1519 (size=903926) 2024-12-02T23:14:53,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742344_1520 (size=8360083) 2024-12-02T23:14:53,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742344_1520 (size=8360083) 2024-12-02T23:14:53,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742344_1520 (size=8360083) 2024-12-02T23:14:53,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742345_1521 (size=1877034) 2024-12-02T23:14:53,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742345_1521 (size=1877034) 2024-12-02T23:14:53,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742345_1521 (size=1877034) 2024-12-02T23:14:54,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742346_1522 (size=77835) 2024-12-02T23:14:54,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742346_1522 (size=77835) 2024-12-02T23:14:54,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742346_1522 (size=77835) 2024-12-02T23:14:54,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742347_1523 (size=30949) 2024-12-02T23:14:54,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742347_1523 (size=30949) 2024-12-02T23:14:54,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742347_1523 (size=30949) 2024-12-02T23:14:54,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742348_1524 (size=1597213) 2024-12-02T23:14:54,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742348_1524 (size=1597213) 2024-12-02T23:14:54,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742348_1524 (size=1597213) 2024-12-02T23:14:54,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742349_1525 (size=6424743) 2024-12-02T23:14:54,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742349_1525 (size=6424743) 2024-12-02T23:14:54,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742349_1525 (size=6424743) 2024-12-02T23:14:54,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742350_1526 (size=4695811) 2024-12-02T23:14:54,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742350_1526 (size=4695811) 2024-12-02T23:14:54,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742350_1526 (size=4695811) 2024-12-02T23:14:54,851 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T23:14:54,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742351_1527 (size=232957) 2024-12-02T23:14:54,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742351_1527 (size=232957) 2024-12-02T23:14:54,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742351_1527 (size=232957) 2024-12-02T23:14:54,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742352_1528 (size=127628) 2024-12-02T23:14:54,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742352_1528 (size=127628) 2024-12-02T23:14:54,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742352_1528 (size=127628) 2024-12-02T23:14:55,202 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-02T23:14:55,202 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum Metrics about Tables on a single HBase RegionServer 2024-12-02T23:14:55,203 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-02T23:14:55,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742353_1529 (size=20406) 2024-12-02T23:14:55,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742353_1529 (size=20406) 2024-12-02T23:14:55,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742353_1529 (size=20406) 2024-12-02T23:14:55,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742354_1530 (size=5175431) 2024-12-02T23:14:55,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742354_1530 (size=5175431) 2024-12-02T23:14:55,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742354_1530 (size=5175431) 2024-12-02T23:14:55,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742355_1531 (size=217634) 2024-12-02T23:14:55,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742355_1531 (size=217634) 2024-12-02T23:14:55,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742355_1531 (size=217634) 2024-12-02T23:14:55,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742356_1532 (size=1832290) 2024-12-02T23:14:55,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742356_1532 (size=1832290) 2024-12-02T23:14:55,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742356_1532 (size=1832290) 2024-12-02T23:14:55,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742357_1533 (size=322274) 2024-12-02T23:14:55,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742357_1533 (size=322274) 2024-12-02T23:14:55,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742357_1533 (size=322274) 2024-12-02T23:14:55,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742358_1534 (size=503880) 2024-12-02T23:14:55,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742358_1534 (size=503880) 2024-12-02T23:14:55,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742358_1534 (size=503880) 2024-12-02T23:14:55,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742359_1535 (size=29229) 2024-12-02T23:14:55,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742359_1535 (size=29229) 2024-12-02T23:14:55,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742359_1535 (size=29229) 2024-12-02T23:14:55,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742360_1536 (size=24096) 2024-12-02T23:14:55,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742360_1536 (size=24096) 2024-12-02T23:14:55,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742360_1536 (size=24096) 2024-12-02T23:14:56,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742361_1537 (size=111872) 2024-12-02T23:14:56,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742361_1537 (size=111872) 2024-12-02T23:14:56,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742361_1537 (size=111872) 2024-12-02T23:14:56,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742362_1538 (size=45609) 2024-12-02T23:14:56,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742362_1538 (size=45609) 2024-12-02T23:14:56,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742362_1538 (size=45609) 2024-12-02T23:14:56,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742363_1539 (size=443172) 2024-12-02T23:14:56,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742363_1539 (size=443172) 2024-12-02T23:14:56,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742363_1539 (size=443172) 2024-12-02T23:14:56,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742364_1540 (size=136454) 2024-12-02T23:14:56,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742364_1540 (size=136454) 2024-12-02T23:14:56,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742364_1540 (size=136454) 2024-12-02T23:14:56,578 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-02T23:14:56,582 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-02T23:14:56,585 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.5 K 2024-12-02T23:14:56,585 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-12-02T23:14:56,585 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.8 K 2024-12-02T23:14:56,585 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.0 K 2024-12-02T23:14:56,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742365_1541 (size=1023) 2024-12-02T23:14:56,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742365_1541 (size=1023) 2024-12-02T23:14:56,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742365_1541 (size=1023) 2024-12-02T23:14:56,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742366_1542 (size=35) 2024-12-02T23:14:56,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742366_1542 (size=35) 2024-12-02T23:14:56,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742366_1542 (size=35) 2024-12-02T23:14:56,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742367_1543 (size=304040) 2024-12-02T23:14:56,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742367_1543 (size=304040) 2024-12-02T23:14:56,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742367_1543 (size=304040) 2024-12-02T23:14:56,695 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T23:14:56,695 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T23:14:56,854 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0009_000001 (auth:SIMPLE) from 127.0.0.1:58968 2024-12-02T23:15:00,706 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T23:15:03,599 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T23:15:04,254 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0009_000001 (auth:SIMPLE) from 127.0.0.1:41970 2024-12-02T23:15:04,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742368_1544 (size=349738) 2024-12-02T23:15:04,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742368_1544 (size=349738) 2024-12-02T23:15:04,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742368_1544 (size=349738) 2024-12-02T23:15:06,675 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0009_000001 (auth:SIMPLE) from 127.0.0.1:54626 2024-12-02T23:15:06,677 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0009_000001 (auth:SIMPLE) from 127.0.0.1:54618 2024-12-02T23:15:06,678 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0009_000001 (auth:SIMPLE) from 127.0.0.1:50388 2024-12-02T23:15:07,376 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0009_000001 (auth:SIMPLE) from 127.0.0.1:50396 2024-12-02T23:15:07,948 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 74f0c13379d206ce1601bf0b945b9ba8, had cached 0 bytes from a total of 5890 2024-12-02T23:15:07,949 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 53b6b7fd5134f3f13f85c7ab43d0a24d, had cached 0 bytes from a total of 14661 2024-12-02T23:15:10,322 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733180955226_0009_01_000006 while processing FINISH_CONTAINERS event Error: java.io.IOException: Checksum mismatch between hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241202a6cd36a43b014576b340f7d38e3d2ca2_407a27bcc253c4f32756dbd0df885650 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181292133/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241202a6cd36a43b014576b340f7d38e3d2ca2_407a27bcc253c4f32756dbd0df885650. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-02T23:15:14,062 DEBUG [master/9e5ab3b623d7:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 53b6b7fd5134f3f13f85c7ab43d0a24d changed from -1.0 to 0.0, refreshing cache 2024-12-02T23:15:14,062 DEBUG [master/9e5ab3b623d7:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 407a27bcc253c4f32756dbd0df885650 changed from -1.0 to 0.0, refreshing cache 2024-12-02T23:15:14,062 DEBUG [master/9e5ab3b623d7:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 74f0c13379d206ce1601bf0b945b9ba8 changed from -1.0 to 0.0, refreshing cache 2024-12-02T23:15:14,063 DEBUG [master/9e5ab3b623d7:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 688a3c000ee460cf14e89bc5b111e260 changed from -1.0 to 0.0, refreshing cache 2024-12-02T23:15:15,269 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_0/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000005/launch_container.sh] 2024-12-02T23:15:15,269 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_0/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000005/container_tokens] 2024-12-02T23:15:15,269 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_0/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000005/sysfs] 2024-12-02T23:15:15,464 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0009_000001 (auth:SIMPLE) from 127.0.0.1:59060 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e202412029026115027cb414889ff9ff2f773fc6a_688a3c000ee460cf14e89bc5b111e260 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181292133/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e202412029026115027cb414889ff9ff2f773fc6a_688a3c000ee460cf14e89bc5b111e260. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-02T23:15:16,475 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0009_000001 (auth:SIMPLE) from 127.0.0.1:59062 2024-12-02T23:15:17,483 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0009_000001 (auth:SIMPLE) from 127.0.0.1:52626 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/688a3c000ee460cf14e89bc5b111e260/cf/4c38af9624654f17b889450939e09497 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181292133/archive/data/default/testtb-testExportWithChecksum/688a3c000ee460cf14e89bc5b111e260/cf/4c38af9624654f17b889450939e09497. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/407a27bcc253c4f32756dbd0df885650/cf/bd4dd6f316514682bc2d902115305ef0 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181292133/archive/data/default/testtb-testExportWithChecksum/407a27bcc253c4f32756dbd0df885650/cf/bd4dd6f316514682bc2d902115305ef0. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-02T23:15:18,366 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733180955226_0009_01_000009 while processing FINISH_CONTAINERS event 2024-12-02T23:15:18,463 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0009_000001 (auth:SIMPLE) from 127.0.0.1:52632 2024-12-02T23:15:19,010 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_0/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000003/launch_container.sh] 2024-12-02T23:15:19,010 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_0/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000003/container_tokens] 2024-12-02T23:15:19,011 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_0/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000003/sysfs] 2024-12-02T23:15:21,367 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733180955226_0009_01_000012 while processing FINISH_CONTAINERS event 2024-12-02T23:15:22,465 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_1/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000004/launch_container.sh] 2024-12-02T23:15:22,466 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_1/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000004/container_tokens] 2024-12-02T23:15:22,466 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_1/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000004/sysfs] 2024-12-02T23:15:22,519 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_1/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000002/launch_container.sh] 2024-12-02T23:15:22,519 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_1/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000002/container_tokens] 2024-12-02T23:15:22,519 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_1/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000002/sysfs] 2024-12-02T23:15:23,719 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_2/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000007/launch_container.sh] 2024-12-02T23:15:23,720 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_2/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000007/container_tokens] 2024-12-02T23:15:23,720 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_2/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000007/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241202a6cd36a43b014576b340f7d38e3d2ca2_407a27bcc253c4f32756dbd0df885650 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181292133/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241202a6cd36a43b014576b340f7d38e3d2ca2_407a27bcc253c4f32756dbd0df885650. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-02T23:15:24,300 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_3/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000008/launch_container.sh] 2024-12-02T23:15:24,300 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_3/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000008/container_tokens] 2024-12-02T23:15:24,300 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_3/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000008/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e202412029026115027cb414889ff9ff2f773fc6a_688a3c000ee460cf14e89bc5b111e260 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181292133/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e202412029026115027cb414889ff9ff2f773fc6a_688a3c000ee460cf14e89bc5b111e260. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-02T23:15:25,067 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_3/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000010/launch_container.sh] 2024-12-02T23:15:25,067 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_3/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000010/container_tokens] 2024-12-02T23:15:25,067 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_3/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000010/sysfs] 2024-12-02T23:15:25,294 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_0/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000011/launch_container.sh] 2024-12-02T23:15:25,294 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_0/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000011/container_tokens] 2024-12-02T23:15:25,294 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_0/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000011/sysfs] 2024-12-02T23:15:25,509 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0009_000001 (auth:SIMPLE) from 127.0.0.1:41996 2024-12-02T23:15:25,510 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0009_000001 (auth:SIMPLE) from 127.0.0.1:41604 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/688a3c000ee460cf14e89bc5b111e260/cf/4c38af9624654f17b889450939e09497 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181292133/archive/data/default/testtb-testExportWithChecksum/688a3c000ee460cf14e89bc5b111e260/cf/4c38af9624654f17b889450939e09497. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/407a27bcc253c4f32756dbd0df885650/cf/bd4dd6f316514682bc2d902115305ef0 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181292133/archive/data/default/testtb-testExportWithChecksum/407a27bcc253c4f32756dbd0df885650/cf/bd4dd6f316514682bc2d902115305ef0. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-02T23:15:26,510 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0009_000001 (auth:SIMPLE) from 127.0.0.1:41998 2024-12-02T23:15:26,510 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0009_000001 (auth:SIMPLE) from 127.0.0.1:41610 2024-12-02T23:15:29,277 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733180955226_0009_01_000017 while processing FINISH_CONTAINERS event 2024-12-02T23:15:31,643 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_1/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000014/launch_container.sh] 2024-12-02T23:15:31,643 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_1/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000014/container_tokens] 2024-12-02T23:15:31,644 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_1/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000014/sysfs] 2024-12-02T23:15:31,960 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_2/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000013/launch_container.sh] 2024-12-02T23:15:31,960 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_2/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000013/container_tokens] 2024-12-02T23:15:31,961 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_2/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000013/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e202412029026115027cb414889ff9ff2f773fc6a_688a3c000ee460cf14e89bc5b111e260 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181292133/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e202412029026115027cb414889ff9ff2f773fc6a_688a3c000ee460cf14e89bc5b111e260. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241202a6cd36a43b014576b340f7d38e3d2ca2_407a27bcc253c4f32756dbd0df885650 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181292133/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241202a6cd36a43b014576b340f7d38e3d2ca2_407a27bcc253c4f32756dbd0df885650. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-02T23:15:32,529 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0009_000001 (auth:SIMPLE) from 127.0.0.1:59982 2024-12-02T23:15:32,535 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0009_000001 (auth:SIMPLE) from 127.0.0.1:57836 2024-12-02T23:15:33,600 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T23:15:33,735 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_3/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000016/launch_container.sh] 2024-12-02T23:15:33,735 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_3/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000016/container_tokens] 2024-12-02T23:15:33,736 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_3/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000016/sysfs] 2024-12-02T23:15:33,820 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_1/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000015/launch_container.sh] 2024-12-02T23:15:33,820 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_1/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000015/container_tokens] 2024-12-02T23:15:33,820 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_1/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000015/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/407a27bcc253c4f32756dbd0df885650/cf/bd4dd6f316514682bc2d902115305ef0 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181292133/archive/data/default/testtb-testExportWithChecksum/407a27bcc253c4f32756dbd0df885650/cf/bd4dd6f316514682bc2d902115305ef0. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/688a3c000ee460cf14e89bc5b111e260/cf/4c38af9624654f17b889450939e09497 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/local-export-1733181292133/archive/data/default/testtb-testExportWithChecksum/688a3c000ee460cf14e89bc5b111e260/cf/4c38af9624654f17b889450939e09497. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-02T23:15:35,000 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 407a27bcc253c4f32756dbd0df885650, had cached 0 bytes from a total of 14853 2024-12-02T23:15:35,002 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 688a3c000ee460cf14e89bc5b111e260, had cached 0 bytes from a total of 5906 2024-12-02T23:15:35,542 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0009_000001 (auth:SIMPLE) from 127.0.0.1:59994 2024-12-02T23:15:35,547 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0009_000001 (auth:SIMPLE) from 127.0.0.1:57848 2024-12-02T23:15:39,429 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0009_000001 (auth:SIMPLE) from 127.0.0.1:60004 2024-12-02T23:15:39,430 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0009_000001 (auth:SIMPLE) from 127.0.0.1:57864 2024-12-02T23:15:39,431 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0009_000001 (auth:SIMPLE) from 127.0.0.1:57876 2024-12-02T23:15:39,600 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733180955226_0009_01_000021 is : 143 2024-12-02T23:15:39,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742369_1545 (size=51217) 2024-12-02T23:15:39,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742369_1545 (size=51217) 2024-12-02T23:15:39,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742369_1545 (size=51217) 2024-12-02T23:15:39,620 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733180955226_0009_01_000020 is : 143 2024-12-02T23:15:39,638 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_2/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000020/launch_container.sh] 2024-12-02T23:15:39,638 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_2/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000020/container_tokens] 2024-12-02T23:15:39,639 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_2/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000020/sysfs] 2024-12-02T23:15:39,649 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_1/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000021/launch_container.sh] 2024-12-02T23:15:39,649 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_1/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000021/container_tokens] 2024-12-02T23:15:39,650 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_1/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000021/sysfs] 2024-12-02T23:15:39,651 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_3/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000019/launch_container.sh] 2024-12-02T23:15:39,651 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_3/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000019/container_tokens] 2024-12-02T23:15:39,651 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_3/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000019/sysfs] 2024-12-02T23:15:39,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742370_1546 (size=461) 2024-12-02T23:15:39,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742370_1546 (size=461) 2024-12-02T23:15:39,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742370_1546 (size=461) 2024-12-02T23:15:39,670 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_1/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000018/launch_container.sh] 2024-12-02T23:15:39,670 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_1/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000018/container_tokens] 2024-12-02T23:15:39,670 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_1/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000018/sysfs] 2024-12-02T23:15:39,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742371_1547 (size=51217) 2024-12-02T23:15:39,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742371_1547 (size=51217) 2024-12-02T23:15:39,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742371_1547 (size=51217) 2024-12-02T23:15:39,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742372_1548 (size=349738) 2024-12-02T23:15:39,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742372_1548 (size=349738) 2024-12-02T23:15:39,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742372_1548 (size=349738) 2024-12-02T23:15:39,779 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0009_000001 (auth:SIMPLE) from 127.0.0.1:60016 2024-12-02T23:15:41,068 ERROR [Time-limited test {}] snapshot.ExportSnapshot(1239): Snapshot export failed org.apache.hadoop.hbase.snapshot.ExportSnapshotException: Task failed task_1733180955226_0009_m_000001 Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0 at org.apache.hadoop.hbase.snapshot.ExportSnapshot.runCopyJob(ExportSnapshot.java:947) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1216) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:570) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemState(TestExportSnapshot.java:400) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportWithChecksum(TestExportSnapshot.java:285) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T23:15:41,070 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181341070 2024-12-02T23:15:41,070 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:33369, tgtDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181341070, rawTgtDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181341070, srcFsUri=hdfs://localhost:33369, srcDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:15:41,102 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:33369, inputRoot=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:15:41,102 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_71179475_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181341070, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181341070/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-02T23:15:41,104 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-02T23:15:41,110 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testExportWithChecksum to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181341070/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-02T23:15:41,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742373_1549 (size=156) 2024-12-02T23:15:41,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742373_1549 (size=156) 2024-12-02T23:15:41,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742373_1549 (size=156) 2024-12-02T23:15:41,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742374_1550 (size=951) 2024-12-02T23:15:41,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742374_1550 (size=951) 2024-12-02T23:15:41,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742374_1550 (size=951) 2024-12-02T23:15:41,142 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:15:41,142 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:15:41,143 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:15:42,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop-2327938889516640237.jar 2024-12-02T23:15:42,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:15:42,556 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:15:42,647 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop-15008485512964420200.jar 2024-12-02T23:15:42,648 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:15:42,648 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:15:42,649 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:15:42,649 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:15:42,649 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:15:42,649 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:15:42,650 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-02T23:15:42,650 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-02T23:15:42,650 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-02T23:15:42,651 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-02T23:15:42,652 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-02T23:15:42,652 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-02T23:15:42,652 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-02T23:15:42,653 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-02T23:15:42,653 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-02T23:15:42,653 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-02T23:15:42,657 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-02T23:15:42,658 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:15:42,658 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:15:42,658 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T23:15:42,659 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:15:42,659 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:15:42,659 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T23:15:42,660 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T23:15:42,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742375_1551 (size=131440) 2024-12-02T23:15:42,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742375_1551 (size=131440) 2024-12-02T23:15:42,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742375_1551 (size=131440) 2024-12-02T23:15:42,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742376_1552 (size=4188619) 2024-12-02T23:15:42,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742376_1552 (size=4188619) 2024-12-02T23:15:42,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742376_1552 (size=4188619) 2024-12-02T23:15:43,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742377_1553 (size=1323991) 2024-12-02T23:15:43,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742377_1553 (size=1323991) 2024-12-02T23:15:43,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742377_1553 (size=1323991) 2024-12-02T23:15:43,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742378_1554 (size=903926) 2024-12-02T23:15:43,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742378_1554 (size=903926) 2024-12-02T23:15:43,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742378_1554 (size=903926) 2024-12-02T23:15:43,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742379_1555 (size=8360083) 2024-12-02T23:15:43,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742379_1555 (size=8360083) 2024-12-02T23:15:43,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742379_1555 (size=8360083) 2024-12-02T23:15:43,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742380_1556 (size=1877034) 2024-12-02T23:15:43,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742380_1556 (size=1877034) 2024-12-02T23:15:43,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742380_1556 (size=1877034) 2024-12-02T23:15:43,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742381_1557 (size=77835) 2024-12-02T23:15:43,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742381_1557 (size=77835) 2024-12-02T23:15:43,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742381_1557 (size=77835) 2024-12-02T23:15:44,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742382_1558 (size=443172) 2024-12-02T23:15:44,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742382_1558 (size=443172) 2024-12-02T23:15:44,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742382_1558 (size=443172) 2024-12-02T23:15:44,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742383_1559 (size=30949) 2024-12-02T23:15:44,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742383_1559 (size=30949) 2024-12-02T23:15:44,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742383_1559 (size=30949) 2024-12-02T23:15:44,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742384_1560 (size=1597213) 2024-12-02T23:15:44,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742384_1560 (size=1597213) 2024-12-02T23:15:44,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742384_1560 (size=1597213) 2024-12-02T23:15:44,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742385_1561 (size=4695811) 2024-12-02T23:15:44,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742385_1561 (size=4695811) 2024-12-02T23:15:44,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742385_1561 (size=4695811) 2024-12-02T23:15:44,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742386_1562 (size=232957) 2024-12-02T23:15:44,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742386_1562 (size=232957) 2024-12-02T23:15:44,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742386_1562 (size=232957) 2024-12-02T23:15:44,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742387_1563 (size=127628) 2024-12-02T23:15:44,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742387_1563 (size=127628) 2024-12-02T23:15:44,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742387_1563 (size=127628) 2024-12-02T23:15:44,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742388_1564 (size=20406) 2024-12-02T23:15:44,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742388_1564 (size=20406) 2024-12-02T23:15:44,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742388_1564 (size=20406) 2024-12-02T23:15:44,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742389_1565 (size=5175431) 2024-12-02T23:15:44,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742389_1565 (size=5175431) 2024-12-02T23:15:44,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742389_1565 (size=5175431) 2024-12-02T23:15:45,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742390_1566 (size=217634) 2024-12-02T23:15:45,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742390_1566 (size=217634) 2024-12-02T23:15:45,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742390_1566 (size=217634) 2024-12-02T23:15:45,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742391_1567 (size=1832290) 2024-12-02T23:15:45,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742391_1567 (size=1832290) 2024-12-02T23:15:45,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742391_1567 (size=1832290) 2024-12-02T23:15:45,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742392_1568 (size=322274) 2024-12-02T23:15:45,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742392_1568 (size=322274) 2024-12-02T23:15:45,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742392_1568 (size=322274) 2024-12-02T23:15:45,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742393_1569 (size=503880) 2024-12-02T23:15:45,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742393_1569 (size=503880) 2024-12-02T23:15:45,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742393_1569 (size=503880) 2024-12-02T23:15:45,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742394_1570 (size=6424743) 2024-12-02T23:15:45,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742394_1570 (size=6424743) 2024-12-02T23:15:45,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742394_1570 (size=6424743) 2024-12-02T23:15:45,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742395_1571 (size=29229) 2024-12-02T23:15:45,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742395_1571 (size=29229) 2024-12-02T23:15:45,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742395_1571 (size=29229) 2024-12-02T23:15:45,897 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0009_000001 (auth:SIMPLE) from 127.0.0.1:58162 2024-12-02T23:15:45,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742396_1572 (size=24096) 2024-12-02T23:15:45,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742396_1572 (size=24096) 2024-12-02T23:15:45,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742396_1572 (size=24096) 2024-12-02T23:15:45,943 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_3/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000001/launch_container.sh] 2024-12-02T23:15:45,943 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_3/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000001/container_tokens] 2024-12-02T23:15:45,943 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_3/usercache/jenkins/appcache/application_1733180955226_0009/container_1733180955226_0009_01_000001/sysfs] 2024-12-02T23:15:46,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742397_1573 (size=111872) 2024-12-02T23:15:46,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742397_1573 (size=111872) 2024-12-02T23:15:46,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742397_1573 (size=111872) 2024-12-02T23:15:46,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742398_1574 (size=45609) 2024-12-02T23:15:46,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742398_1574 (size=45609) 2024-12-02T23:15:46,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742398_1574 (size=45609) 2024-12-02T23:15:46,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742399_1575 (size=136454) 2024-12-02T23:15:46,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742399_1575 (size=136454) 2024-12-02T23:15:46,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742399_1575 (size=136454) 2024-12-02T23:15:46,845 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-02T23:15:46,847 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-02T23:15:46,850 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.5 K 2024-12-02T23:15:46,850 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-12-02T23:15:46,850 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.8 K 2024-12-02T23:15:46,850 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.0 K 2024-12-02T23:15:46,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742400_1576 (size=1023) 2024-12-02T23:15:46,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742400_1576 (size=1023) 2024-12-02T23:15:46,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742400_1576 (size=1023) 2024-12-02T23:15:47,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742401_1577 (size=35) 2024-12-02T23:15:47,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742401_1577 (size=35) 2024-12-02T23:15:47,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742401_1577 (size=35) 2024-12-02T23:15:47,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742402_1578 (size=303994) 2024-12-02T23:15:47,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742402_1578 (size=303994) 2024-12-02T23:15:47,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742402_1578 (size=303994) 2024-12-02T23:15:47,439 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T23:15:47,439 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T23:15:47,644 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0010_000001 (auth:SIMPLE) from 127.0.0.1:38408 2024-12-02T23:15:52,949 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 74f0c13379d206ce1601bf0b945b9ba8, had cached 0 bytes from a total of 5890 2024-12-02T23:15:52,949 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 53b6b7fd5134f3f13f85c7ab43d0a24d, had cached 0 bytes from a total of 14661 2024-12-02T23:15:53,844 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0010_000001 (auth:SIMPLE) from 127.0.0.1:50832 2024-12-02T23:15:54,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742403_1579 (size=349692) 2024-12-02T23:15:54,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742403_1579 (size=349692) 2024-12-02T23:15:54,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742403_1579 (size=349692) 2024-12-02T23:15:56,100 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0010_000001 (auth:SIMPLE) from 127.0.0.1:36738 2024-12-02T23:15:56,100 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0010_000001 (auth:SIMPLE) from 127.0.0.1:38124 2024-12-02T23:15:56,983 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0010_000001 (auth:SIMPLE) from 127.0.0.1:38140 2024-12-02T23:15:56,994 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0010_000001 (auth:SIMPLE) from 127.0.0.1:36744 2024-12-02T23:15:59,901 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733180955226_0010_01_000006 while processing FINISH_CONTAINERS event 2024-12-02T23:16:02,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742404_1580 (size=14853) 2024-12-02T23:16:02,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742404_1580 (size=14853) 2024-12-02T23:16:02,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742404_1580 (size=14853) 2024-12-02T23:16:03,600 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T23:16:06,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742406_1582 (size=5906) 2024-12-02T23:16:06,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742406_1582 (size=5906) 2024-12-02T23:16:06,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742406_1582 (size=5906) 2024-12-02T23:16:06,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742407_1583 (size=8171) 2024-12-02T23:16:06,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742407_1583 (size=8171) 2024-12-02T23:16:06,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742407_1583 (size=8171) 2024-12-02T23:16:06,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742408_1584 (size=5102) 2024-12-02T23:16:06,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742408_1584 (size=5102) 2024-12-02T23:16:06,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742408_1584 (size=5102) 2024-12-02T23:16:06,562 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_0/usercache/jenkins/appcache/application_1733180955226_0010/container_1733180955226_0010_01_000004/launch_container.sh] 2024-12-02T23:16:06,562 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_0/usercache/jenkins/appcache/application_1733180955226_0010/container_1733180955226_0010_01_000004/container_tokens] 2024-12-02T23:16:06,562 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_0/usercache/jenkins/appcache/application_1733180955226_0010/container_1733180955226_0010_01_000004/sysfs] 2024-12-02T23:16:06,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742405_1581 (size=31736) 2024-12-02T23:16:06,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742405_1581 (size=31736) 2024-12-02T23:16:06,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742405_1581 (size=31736) 2024-12-02T23:16:06,635 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_0/usercache/jenkins/appcache/application_1733180955226_0010/container_1733180955226_0010_01_000003/launch_container.sh] 2024-12-02T23:16:06,635 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_0/usercache/jenkins/appcache/application_1733180955226_0010/container_1733180955226_0010_01_000003/container_tokens] 2024-12-02T23:16:06,635 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_0/usercache/jenkins/appcache/application_1733180955226_0010/container_1733180955226_0010_01_000003/sysfs] 2024-12-02T23:16:06,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742409_1585 (size=463) 2024-12-02T23:16:06,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742409_1585 (size=463) 2024-12-02T23:16:06,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742409_1585 (size=463) 2024-12-02T23:16:06,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742410_1586 (size=31736) 2024-12-02T23:16:06,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742410_1586 (size=31736) 2024-12-02T23:16:06,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742410_1586 (size=31736) 2024-12-02T23:16:06,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742411_1587 (size=349692) 2024-12-02T23:16:06,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742411_1587 (size=349692) 2024-12-02T23:16:06,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742411_1587 (size=349692) 2024-12-02T23:16:06,912 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0010_000001 (auth:SIMPLE) from 127.0.0.1:36354 2024-12-02T23:16:06,924 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0010_000001 (auth:SIMPLE) from 127.0.0.1:38028 2024-12-02T23:16:06,929 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0010_000001 (auth:SIMPLE) from 127.0.0.1:38036 2024-12-02T23:16:08,640 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_0/usercache/jenkins/appcache/application_1733180955226_0010/container_1733180955226_0010_01_000002/launch_container.sh] 2024-12-02T23:16:08,640 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_0/usercache/jenkins/appcache/application_1733180955226_0010/container_1733180955226_0010_01_000002/container_tokens] 2024-12-02T23:16:08,640 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_0/usercache/jenkins/appcache/application_1733180955226_0010/container_1733180955226_0010_01_000002/sysfs] 2024-12-02T23:16:08,717 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-02T23:16:08,718 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-02T23:16:08,782 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportWithChecksum 2024-12-02T23:16:08,782 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-02T23:16:08,783 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-02T23:16:08,783 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_71179475_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-02T23:16:08,783 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-02T23:16:08,784 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-02T23:16:08,784 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_71179475_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181341070/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181341070/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-02T23:16:08,784 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181341070/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-02T23:16:08,784 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181341070/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-02T23:16:08,792 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testExportWithChecksum 2024-12-02T23:16:08,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=224, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum 2024-12-02T23:16:08,806 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181368805"}]},"ts":"1733181368805"} 2024-12-02T23:16:08,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=224 2024-12-02T23:16:08,814 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLING in hbase:meta 2024-12-02T23:16:08,814 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithChecksum to state=DISABLING 2024-12-02T23:16:08,815 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=225, ppid=224, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum}] 2024-12-02T23:16:08,818 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=226, ppid=225, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=688a3c000ee460cf14e89bc5b111e260, UNASSIGN}, {pid=227, ppid=225, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=407a27bcc253c4f32756dbd0df885650, UNASSIGN}] 2024-12-02T23:16:08,819 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=227, ppid=225, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=407a27bcc253c4f32756dbd0df885650, UNASSIGN 2024-12-02T23:16:08,819 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=226, ppid=225, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=688a3c000ee460cf14e89bc5b111e260, UNASSIGN 2024-12-02T23:16:08,820 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=226 updating hbase:meta row=688a3c000ee460cf14e89bc5b111e260, regionState=CLOSING, regionLocation=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:16:08,820 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=227 updating hbase:meta row=407a27bcc253c4f32756dbd0df885650, regionState=CLOSING, regionLocation=9e5ab3b623d7,38049,1733180945779 2024-12-02T23:16:08,824 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=226, ppid=225, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=688a3c000ee460cf14e89bc5b111e260, UNASSIGN because future has completed 2024-12-02T23:16:08,824 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41513 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=9e5ab3b623d7,41311,1733180945828, table=testtb-testExportWithChecksum, region=688a3c000ee460cf14e89bc5b111e260. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-02T23:16:08,830 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T23:16:08,830 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=228, ppid=226, state=RUNNABLE, hasLock=false; CloseRegionProcedure 688a3c000ee460cf14e89bc5b111e260, server=9e5ab3b623d7,41311,1733180945828}] 2024-12-02T23:16:08,830 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=227, ppid=225, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=407a27bcc253c4f32756dbd0df885650, UNASSIGN because future has completed 2024-12-02T23:16:08,846 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41513 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=9e5ab3b623d7,38049,1733180945779, table=testtb-testExportWithChecksum, region=407a27bcc253c4f32756dbd0df885650. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-02T23:16:08,847 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T23:16:08,847 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=229, ppid=227, state=RUNNABLE, hasLock=false; CloseRegionProcedure 407a27bcc253c4f32756dbd0df885650, server=9e5ab3b623d7,38049,1733180945779}] 2024-12-02T23:16:08,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=224 2024-12-02T23:16:09,000 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] handler.UnassignRegionHandler(122): Close 688a3c000ee460cf14e89bc5b111e260 2024-12-02T23:16:09,000 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T23:16:09,000 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1722): Closing 688a3c000ee460cf14e89bc5b111e260, disabling compactions & flushes 2024-12-02T23:16:09,000 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1733181289625.688a3c000ee460cf14e89bc5b111e260. 2024-12-02T23:16:09,000 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1733181289625.688a3c000ee460cf14e89bc5b111e260. 2024-12-02T23:16:09,000 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1733181289625.688a3c000ee460cf14e89bc5b111e260. after waiting 0 ms 2024-12-02T23:16:09,000 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1733181289625.688a3c000ee460cf14e89bc5b111e260. 2024-12-02T23:16:09,005 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] handler.UnassignRegionHandler(122): Close 407a27bcc253c4f32756dbd0df885650 2024-12-02T23:16:09,005 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T23:16:09,005 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] regionserver.HRegion(1722): Closing 407a27bcc253c4f32756dbd0df885650, disabling compactions & flushes 2024-12-02T23:16:09,005 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1733181289625.407a27bcc253c4f32756dbd0df885650. 2024-12-02T23:16:09,006 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1733181289625.407a27bcc253c4f32756dbd0df885650. 2024-12-02T23:16:09,006 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1733181289625.407a27bcc253c4f32756dbd0df885650. after waiting 0 ms 2024-12-02T23:16:09,006 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1733181289625.407a27bcc253c4f32756dbd0df885650. 2024-12-02T23:16:09,025 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/688a3c000ee460cf14e89bc5b111e260/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T23:16:09,026 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:16:09,026 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1733181289625.688a3c000ee460cf14e89bc5b111e260. 2024-12-02T23:16:09,026 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1676): Region close journal for 688a3c000ee460cf14e89bc5b111e260: Waiting for close lock at 1733181369000Running coprocessor pre-close hooks at 1733181369000Disabling compacts and flushes for region at 1733181369000Disabling writes for close at 1733181369000Writing region close event to WAL at 1733181369007 (+7 ms)Running coprocessor post-close hooks at 1733181369026 (+19 ms)Closed at 1733181369026 2024-12-02T23:16:09,029 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] handler.UnassignRegionHandler(157): Closed 688a3c000ee460cf14e89bc5b111e260 2024-12-02T23:16:09,029 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=226 updating hbase:meta row=688a3c000ee460cf14e89bc5b111e260, regionState=CLOSED 2024-12-02T23:16:09,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=228, ppid=226, state=RUNNABLE, hasLock=false; CloseRegionProcedure 688a3c000ee460cf14e89bc5b111e260, server=9e5ab3b623d7,41311,1733180945828 because future has completed 2024-12-02T23:16:09,034 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/407a27bcc253c4f32756dbd0df885650/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T23:16:09,035 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:16:09,035 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1733181289625.407a27bcc253c4f32756dbd0df885650. 2024-12-02T23:16:09,035 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] regionserver.HRegion(1676): Region close journal for 407a27bcc253c4f32756dbd0df885650: Waiting for close lock at 1733181369005Running coprocessor pre-close hooks at 1733181369005Disabling compacts and flushes for region at 1733181369005Disabling writes for close at 1733181369006 (+1 ms)Writing region close event to WAL at 1733181369008 (+2 ms)Running coprocessor post-close hooks at 1733181369034 (+26 ms)Closed at 1733181369035 (+1 ms) 2024-12-02T23:16:09,038 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] handler.UnassignRegionHandler(157): Closed 407a27bcc253c4f32756dbd0df885650 2024-12-02T23:16:09,040 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=228, resume processing ppid=226 2024-12-02T23:16:09,040 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=228, ppid=226, state=SUCCESS, hasLock=false; CloseRegionProcedure 688a3c000ee460cf14e89bc5b111e260, server=9e5ab3b623d7,41311,1733180945828 in 205 msec 2024-12-02T23:16:09,040 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=227 updating hbase:meta row=407a27bcc253c4f32756dbd0df885650, regionState=CLOSED 2024-12-02T23:16:09,043 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=229, ppid=227, state=RUNNABLE, hasLock=false; CloseRegionProcedure 407a27bcc253c4f32756dbd0df885650, server=9e5ab3b623d7,38049,1733180945779 because future has completed 2024-12-02T23:16:09,050 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=226, ppid=225, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=688a3c000ee460cf14e89bc5b111e260, UNASSIGN in 223 msec 2024-12-02T23:16:09,065 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=229, resume processing ppid=227 2024-12-02T23:16:09,065 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=229, ppid=227, state=SUCCESS, hasLock=false; CloseRegionProcedure 407a27bcc253c4f32756dbd0df885650, server=9e5ab3b623d7,38049,1733180945779 in 215 msec 2024-12-02T23:16:09,068 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=227, resume processing ppid=225 2024-12-02T23:16:09,068 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=227, ppid=225, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=407a27bcc253c4f32756dbd0df885650, UNASSIGN in 248 msec 2024-12-02T23:16:09,071 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=225, resume processing ppid=224 2024-12-02T23:16:09,071 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=225, ppid=224, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum in 254 msec 2024-12-02T23:16:09,073 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181369073"}]},"ts":"1733181369073"} 2024-12-02T23:16:09,075 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLED in hbase:meta 2024-12-02T23:16:09,075 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithChecksum to state=DISABLED 2024-12-02T23:16:09,078 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=224, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum in 284 msec 2024-12-02T23:16:09,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=224 2024-12-02T23:16:09,123 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-02T23:16:09,123 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testExportWithChecksum 2024-12-02T23:16:09,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=230, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-02T23:16:09,125 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=230, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-02T23:16:09,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithChecksum 2024-12-02T23:16:09,128 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=230, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-02T23:16:09,134 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41311 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithChecksum 2024-12-02T23:16:09,142 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-02T23:16:09,142 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-02T23:16:09,142 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-02T23:16:09,143 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-02T23:16:09,144 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/688a3c000ee460cf14e89bc5b111e260 2024-12-02T23:16:09,144 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/407a27bcc253c4f32756dbd0df885650 2024-12-02T23:16:09,144 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-02T23:16:09,144 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-02T23:16:09,145 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-02T23:16:09,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-02T23:16:09,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:16:09,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-02T23:16:09,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:16:09,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-02T23:16:09,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:16:09,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:16:09,146 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data null 2024-12-02T23:16:09,146 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-02T23:16:09,147 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:16:09,147 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:16:09,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=230 2024-12-02T23:16:09,148 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:16:09,148 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:16:09,161 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/688a3c000ee460cf14e89bc5b111e260/cf, FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/688a3c000ee460cf14e89bc5b111e260/recovered.edits] 2024-12-02T23:16:09,163 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/407a27bcc253c4f32756dbd0df885650/cf, FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/407a27bcc253c4f32756dbd0df885650/recovered.edits] 2024-12-02T23:16:09,187 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/407a27bcc253c4f32756dbd0df885650/cf/bd4dd6f316514682bc2d902115305ef0 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportWithChecksum/407a27bcc253c4f32756dbd0df885650/cf/bd4dd6f316514682bc2d902115305ef0 2024-12-02T23:16:09,190 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/407a27bcc253c4f32756dbd0df885650/recovered.edits/9.seqid to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportWithChecksum/407a27bcc253c4f32756dbd0df885650/recovered.edits/9.seqid 2024-12-02T23:16:09,191 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/407a27bcc253c4f32756dbd0df885650 2024-12-02T23:16:09,191 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/688a3c000ee460cf14e89bc5b111e260/cf/4c38af9624654f17b889450939e09497 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportWithChecksum/688a3c000ee460cf14e89bc5b111e260/cf/4c38af9624654f17b889450939e09497 2024-12-02T23:16:09,204 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/688a3c000ee460cf14e89bc5b111e260/recovered.edits/9.seqid to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportWithChecksum/688a3c000ee460cf14e89bc5b111e260/recovered.edits/9.seqid 2024-12-02T23:16:09,205 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportWithChecksum/688a3c000ee460cf14e89bc5b111e260 2024-12-02T23:16:09,205 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithChecksum regions 2024-12-02T23:16:09,205 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6 2024-12-02T23:16:09,206 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf] 2024-12-02T23:16:09,210 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241202a6cd36a43b014576b340f7d38e3d2ca2_407a27bcc253c4f32756dbd0df885650 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241202a6cd36a43b014576b340f7d38e3d2ca2_407a27bcc253c4f32756dbd0df885650 2024-12-02T23:16:09,211 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e202412029026115027cb414889ff9ff2f773fc6a_688a3c000ee460cf14e89bc5b111e260 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e202412029026115027cb414889ff9ff2f773fc6a_688a3c000ee460cf14e89bc5b111e260 2024-12-02T23:16:09,212 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6 2024-12-02T23:16:09,214 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=230, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-02T23:16:09,217 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithChecksum from hbase:meta 2024-12-02T23:16:09,219 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithChecksum' descriptor. 2024-12-02T23:16:09,220 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=230, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-02T23:16:09,220 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithChecksum' from region states. 2024-12-02T23:16:09,220 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,,1733181289625.688a3c000ee460cf14e89bc5b111e260.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733181369220"}]},"ts":"9223372036854775807"} 2024-12-02T23:16:09,220 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,1,1733181289625.407a27bcc253c4f32756dbd0df885650.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733181369220"}]},"ts":"9223372036854775807"} 2024-12-02T23:16:09,222 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-02T23:16:09,223 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 688a3c000ee460cf14e89bc5b111e260, NAME => 'testtb-testExportWithChecksum,,1733181289625.688a3c000ee460cf14e89bc5b111e260.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 407a27bcc253c4f32756dbd0df885650, NAME => 'testtb-testExportWithChecksum,1,1733181289625.407a27bcc253c4f32756dbd0df885650.', STARTKEY => '1', ENDKEY => ''}] 2024-12-02T23:16:09,223 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithChecksum' as deleted. 2024-12-02T23:16:09,223 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733181369223"}]},"ts":"9223372036854775807"} 2024-12-02T23:16:09,225 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithChecksum state from META 2024-12-02T23:16:09,226 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=230, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-02T23:16:09,228 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=230, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum in 103 msec 2024-12-02T23:16:09,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=230 2024-12-02T23:16:09,252 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithChecksum 2024-12-02T23:16:09,252 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-02T23:16:09,259 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testExportWithChecksum" type: DISABLED 2024-12-02T23:16:09,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithChecksum 2024-12-02T23:16:09,262 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testExportWithChecksum" type: DISABLED 2024-12-02T23:16:09,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithChecksum 2024-12-02T23:16:09,297 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithChecksum Thread=809 (was 813), OpenFileDescriptor=791 (was 790) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1084 (was 968) - SystemLoadAverage LEAK? -, ProcessCount=22 (was 19) - ProcessCount LEAK? -, AvailableMemoryMB=1375 (was 1031) - AvailableMemoryMB LEAK? - 2024-12-02T23:16:09,297 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=809 is superior to 500 2024-12-02T23:16:09,324 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=809, OpenFileDescriptor=791, MaxFileDescriptor=1048576, SystemLoadAverage=1084, ProcessCount=22, AvailableMemoryMB=1371 2024-12-02T23:16:09,324 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=809 is superior to 500 2024-12-02T23:16:09,326 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T23:16:09,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=231, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:09,328 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=231, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T23:16:09,330 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSkipTmp" procId is: 231 2024-12-02T23:16:09,330 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=231, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T23:16:09,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=231 2024-12-02T23:16:09,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742412_1588 (size=454) 2024-12-02T23:16:09,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742412_1588 (size=454) 2024-12-02T23:16:09,373 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => baf4bd48ffb757582598f40a8d64aa1c, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733181369325.baf4bd48ffb757582598f40a8d64aa1c.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:16:09,374 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 4872baa9eb6392c2c49a511c37185e0d, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733181369325.4872baa9eb6392c2c49a511c37185e0d.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:16:09,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742412_1588 (size=454) 2024-12-02T23:16:09,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742413_1589 (size=79) 2024-12-02T23:16:09,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742413_1589 (size=79) 2024-12-02T23:16:09,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742413_1589 (size=79) 2024-12-02T23:16:09,421 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1733181369325.4872baa9eb6392c2c49a511c37185e0d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:16:09,421 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1722): Closing 4872baa9eb6392c2c49a511c37185e0d, disabling compactions & flushes 2024-12-02T23:16:09,421 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1733181369325.4872baa9eb6392c2c49a511c37185e0d. 2024-12-02T23:16:09,421 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733181369325.4872baa9eb6392c2c49a511c37185e0d. 2024-12-02T23:16:09,421 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733181369325.4872baa9eb6392c2c49a511c37185e0d. after waiting 0 ms 2024-12-02T23:16:09,421 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1733181369325.4872baa9eb6392c2c49a511c37185e0d. 2024-12-02T23:16:09,421 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1733181369325.4872baa9eb6392c2c49a511c37185e0d. 2024-12-02T23:16:09,421 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1676): Region close journal for 4872baa9eb6392c2c49a511c37185e0d: Waiting for close lock at 1733181369421Disabling compacts and flushes for region at 1733181369421Disabling writes for close at 1733181369421Writing region close event to WAL at 1733181369421Closed at 1733181369421 2024-12-02T23:16:09,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742414_1590 (size=79) 2024-12-02T23:16:09,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742414_1590 (size=79) 2024-12-02T23:16:09,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742414_1590 (size=79) 2024-12-02T23:16:09,430 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1733181369325.baf4bd48ffb757582598f40a8d64aa1c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:16:09,430 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1722): Closing baf4bd48ffb757582598f40a8d64aa1c, disabling compactions & flushes 2024-12-02T23:16:09,430 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1733181369325.baf4bd48ffb757582598f40a8d64aa1c. 2024-12-02T23:16:09,430 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733181369325.baf4bd48ffb757582598f40a8d64aa1c. 2024-12-02T23:16:09,430 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733181369325.baf4bd48ffb757582598f40a8d64aa1c. after waiting 0 ms 2024-12-02T23:16:09,430 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1733181369325.baf4bd48ffb757582598f40a8d64aa1c. 2024-12-02T23:16:09,430 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1733181369325.baf4bd48ffb757582598f40a8d64aa1c. 2024-12-02T23:16:09,430 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1676): Region close journal for baf4bd48ffb757582598f40a8d64aa1c: Waiting for close lock at 1733181369430Disabling compacts and flushes for region at 1733181369430Disabling writes for close at 1733181369430Writing region close event to WAL at 1733181369430Closed at 1733181369430 2024-12-02T23:16:09,432 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=231, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T23:16:09,432 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1733181369325.4872baa9eb6392c2c49a511c37185e0d.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1733181369432"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733181369432"}]},"ts":"1733181369432"} 2024-12-02T23:16:09,432 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1733181369325.baf4bd48ffb757582598f40a8d64aa1c.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1733181369432"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733181369432"}]},"ts":"1733181369432"} 2024-12-02T23:16:09,436 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T23:16:09,438 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=231, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T23:16:09,438 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181369438"}]},"ts":"1733181369438"} 2024-12-02T23:16:09,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=231 2024-12-02T23:16:09,446 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLING in hbase:meta 2024-12-02T23:16:09,446 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {9e5ab3b623d7=0} racks are {/default-rack=0} 2024-12-02T23:16:09,451 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T23:16:09,451 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T23:16:09,451 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T23:16:09,451 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T23:16:09,451 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T23:16:09,451 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T23:16:09,451 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T23:16:09,451 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T23:16:09,451 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T23:16:09,451 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T23:16:09,452 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=baf4bd48ffb757582598f40a8d64aa1c, ASSIGN}, {pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=4872baa9eb6392c2c49a511c37185e0d, ASSIGN}] 2024-12-02T23:16:09,455 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=baf4bd48ffb757582598f40a8d64aa1c, ASSIGN 2024-12-02T23:16:09,455 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=baf4bd48ffb757582598f40a8d64aa1c, ASSIGN; state=OFFLINE, location=9e5ab3b623d7,39847,1733180945622; forceNewPlan=false, retain=false 2024-12-02T23:16:09,457 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=4872baa9eb6392c2c49a511c37185e0d, ASSIGN 2024-12-02T23:16:09,458 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=4872baa9eb6392c2c49a511c37185e0d, ASSIGN; state=OFFLINE, location=9e5ab3b623d7,41311,1733180945828; forceNewPlan=false, retain=false 2024-12-02T23:16:09,609 INFO [9e5ab3b623d7:41513 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T23:16:09,610 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=233 updating hbase:meta row=4872baa9eb6392c2c49a511c37185e0d, regionState=OPENING, regionLocation=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:16:09,610 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=232 updating hbase:meta row=baf4bd48ffb757582598f40a8d64aa1c, regionState=OPENING, regionLocation=9e5ab3b623d7,39847,1733180945622 2024-12-02T23:16:09,618 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=4872baa9eb6392c2c49a511c37185e0d, ASSIGN because future has completed 2024-12-02T23:16:09,622 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=234, ppid=233, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4872baa9eb6392c2c49a511c37185e0d, server=9e5ab3b623d7,41311,1733180945828}] 2024-12-02T23:16:09,623 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=baf4bd48ffb757582598f40a8d64aa1c, ASSIGN because future has completed 2024-12-02T23:16:09,635 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=235, ppid=232, state=RUNNABLE, hasLock=false; OpenRegionProcedure baf4bd48ffb757582598f40a8d64aa1c, server=9e5ab3b623d7,39847,1733180945622}] 2024-12-02T23:16:09,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=231 2024-12-02T23:16:09,777 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,1,1733181369325.4872baa9eb6392c2c49a511c37185e0d. 2024-12-02T23:16:09,777 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(7752): Opening region: {ENCODED => 4872baa9eb6392c2c49a511c37185e0d, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733181369325.4872baa9eb6392c2c49a511c37185e0d.', STARTKEY => '1', ENDKEY => ''} 2024-12-02T23:16:09,778 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,1,1733181369325.4872baa9eb6392c2c49a511c37185e0d. service=AccessControlService 2024-12-02T23:16:09,778 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T23:16:09,778 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 4872baa9eb6392c2c49a511c37185e0d 2024-12-02T23:16:09,778 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1733181369325.4872baa9eb6392c2c49a511c37185e0d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:16:09,778 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(7794): checking encryption for 4872baa9eb6392c2c49a511c37185e0d 2024-12-02T23:16:09,778 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(7797): checking classloading for 4872baa9eb6392c2c49a511c37185e0d 2024-12-02T23:16:09,780 INFO [StoreOpener-4872baa9eb6392c2c49a511c37185e0d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 4872baa9eb6392c2c49a511c37185e0d 2024-12-02T23:16:09,782 INFO [StoreOpener-4872baa9eb6392c2c49a511c37185e0d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4872baa9eb6392c2c49a511c37185e0d columnFamilyName cf 2024-12-02T23:16:09,783 DEBUG [StoreOpener-4872baa9eb6392c2c49a511c37185e0d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:16:09,784 INFO [StoreOpener-4872baa9eb6392c2c49a511c37185e0d-1 {}] regionserver.HStore(327): Store=4872baa9eb6392c2c49a511c37185e0d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T23:16:09,784 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(1038): replaying wal for 4872baa9eb6392c2c49a511c37185e0d 2024-12-02T23:16:09,785 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSkipTmp/4872baa9eb6392c2c49a511c37185e0d 2024-12-02T23:16:09,785 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSkipTmp/4872baa9eb6392c2c49a511c37185e0d 2024-12-02T23:16:09,786 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(1048): stopping wal replay for 4872baa9eb6392c2c49a511c37185e0d 2024-12-02T23:16:09,786 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(1060): Cleaning up temporary data for 4872baa9eb6392c2c49a511c37185e0d 2024-12-02T23:16:09,787 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(1093): writing seq id for 4872baa9eb6392c2c49a511c37185e0d 2024-12-02T23:16:09,790 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,,1733181369325.baf4bd48ffb757582598f40a8d64aa1c. 2024-12-02T23:16:09,790 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(7752): Opening region: {ENCODED => baf4bd48ffb757582598f40a8d64aa1c, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733181369325.baf4bd48ffb757582598f40a8d64aa1c.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T23:16:09,790 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,,1733181369325.baf4bd48ffb757582598f40a8d64aa1c. service=AccessControlService 2024-12-02T23:16:09,791 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T23:16:09,791 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp baf4bd48ffb757582598f40a8d64aa1c 2024-12-02T23:16:09,791 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1733181369325.baf4bd48ffb757582598f40a8d64aa1c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T23:16:09,791 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(7794): checking encryption for baf4bd48ffb757582598f40a8d64aa1c 2024-12-02T23:16:09,791 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(7797): checking classloading for baf4bd48ffb757582598f40a8d64aa1c 2024-12-02T23:16:09,793 INFO [StoreOpener-baf4bd48ffb757582598f40a8d64aa1c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region baf4bd48ffb757582598f40a8d64aa1c 2024-12-02T23:16:09,799 INFO [StoreOpener-baf4bd48ffb757582598f40a8d64aa1c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region baf4bd48ffb757582598f40a8d64aa1c columnFamilyName cf 2024-12-02T23:16:09,801 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSkipTmp/4872baa9eb6392c2c49a511c37185e0d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T23:16:09,802 DEBUG [StoreOpener-baf4bd48ffb757582598f40a8d64aa1c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:16:09,802 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(1114): Opened 4872baa9eb6392c2c49a511c37185e0d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67001367, jitterRate=-0.0016018301248550415}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T23:16:09,802 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4872baa9eb6392c2c49a511c37185e0d 2024-12-02T23:16:09,803 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(1006): Region open journal for 4872baa9eb6392c2c49a511c37185e0d: Running coprocessor pre-open hook at 1733181369778Writing region info on filesystem at 1733181369778Initializing all the Stores at 1733181369779 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733181369779Cleaning up temporary data from old regions at 1733181369786 (+7 ms)Running coprocessor post-open hooks at 1733181369802 (+16 ms)Region opened successfully at 1733181369803 (+1 ms) 2024-12-02T23:16:09,804 INFO [StoreOpener-baf4bd48ffb757582598f40a8d64aa1c-1 {}] regionserver.HStore(327): Store=baf4bd48ffb757582598f40a8d64aa1c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T23:16:09,805 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(1038): replaying wal for baf4bd48ffb757582598f40a8d64aa1c 2024-12-02T23:16:09,807 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSkipTmp/baf4bd48ffb757582598f40a8d64aa1c 2024-12-02T23:16:09,809 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSkipTmp/baf4bd48ffb757582598f40a8d64aa1c 2024-12-02T23:16:09,809 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,1,1733181369325.4872baa9eb6392c2c49a511c37185e0d., pid=234, masterSystemTime=1733181369774 2024-12-02T23:16:09,813 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(1048): stopping wal replay for baf4bd48ffb757582598f40a8d64aa1c 2024-12-02T23:16:09,814 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(1060): Cleaning up temporary data for baf4bd48ffb757582598f40a8d64aa1c 2024-12-02T23:16:09,816 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,1,1733181369325.4872baa9eb6392c2c49a511c37185e0d. 2024-12-02T23:16:09,816 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,1,1733181369325.4872baa9eb6392c2c49a511c37185e0d. 2024-12-02T23:16:09,816 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=233 updating hbase:meta row=4872baa9eb6392c2c49a511c37185e0d, regionState=OPEN, openSeqNum=2, regionLocation=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:16:09,818 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=234, ppid=233, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4872baa9eb6392c2c49a511c37185e0d, server=9e5ab3b623d7,41311,1733180945828 because future has completed 2024-12-02T23:16:09,823 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(1093): writing seq id for baf4bd48ffb757582598f40a8d64aa1c 2024-12-02T23:16:09,832 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSkipTmp/baf4bd48ffb757582598f40a8d64aa1c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T23:16:09,833 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(1114): Opened baf4bd48ffb757582598f40a8d64aa1c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71431911, jitterRate=0.06441842019557953}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T23:16:09,833 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(1122): Running coprocessor post-open hooks for baf4bd48ffb757582598f40a8d64aa1c 2024-12-02T23:16:09,833 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(1006): Region open journal for baf4bd48ffb757582598f40a8d64aa1c: Running coprocessor pre-open hook at 1733181369791Writing region info on filesystem at 1733181369791Initializing all the Stores at 1733181369792 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733181369793 (+1 ms)Cleaning up temporary data from old regions at 1733181369814 (+21 ms)Running coprocessor post-open hooks at 1733181369833 (+19 ms)Region opened successfully at 1733181369833 2024-12-02T23:16:09,836 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=234, resume processing ppid=233 2024-12-02T23:16:09,836 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=234, ppid=233, state=SUCCESS, hasLock=false; OpenRegionProcedure 4872baa9eb6392c2c49a511c37185e0d, server=9e5ab3b623d7,41311,1733180945828 in 211 msec 2024-12-02T23:16:09,840 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,,1733181369325.baf4bd48ffb757582598f40a8d64aa1c., pid=235, masterSystemTime=1733181369787 2024-12-02T23:16:09,842 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=233, ppid=231, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=4872baa9eb6392c2c49a511c37185e0d, ASSIGN in 384 msec 2024-12-02T23:16:09,843 DEBUG [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,,1733181369325.baf4bd48ffb757582598f40a8d64aa1c. 2024-12-02T23:16:09,843 INFO [RS_OPEN_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,,1733181369325.baf4bd48ffb757582598f40a8d64aa1c. 2024-12-02T23:16:09,843 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=232 updating hbase:meta row=baf4bd48ffb757582598f40a8d64aa1c, regionState=OPEN, openSeqNum=2, regionLocation=9e5ab3b623d7,39847,1733180945622 2024-12-02T23:16:09,845 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=235, ppid=232, state=RUNNABLE, hasLock=false; OpenRegionProcedure baf4bd48ffb757582598f40a8d64aa1c, server=9e5ab3b623d7,39847,1733180945622 because future has completed 2024-12-02T23:16:09,850 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=235, resume processing ppid=232 2024-12-02T23:16:09,850 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=235, ppid=232, state=SUCCESS, hasLock=false; OpenRegionProcedure baf4bd48ffb757582598f40a8d64aa1c, server=9e5ab3b623d7,39847,1733180945622 in 211 msec 2024-12-02T23:16:09,854 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=232, resume processing ppid=231 2024-12-02T23:16:09,854 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=232, ppid=231, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=baf4bd48ffb757582598f40a8d64aa1c, ASSIGN in 398 msec 2024-12-02T23:16:09,855 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=231, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T23:16:09,855 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181369855"}]},"ts":"1733181369855"} 2024-12-02T23:16:09,859 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLED in hbase:meta 2024-12-02T23:16:09,860 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=231, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T23:16:09,860 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSkipTmp jenkins: RWXCA 2024-12-02T23:16:09,863 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41311 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-02T23:16:09,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:16:09,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:16:09,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:16:09,871 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:16:09,873 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:16:09,873 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:16:09,873 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:16:09,873 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-02T23:16:09,874 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-02T23:16:09,874 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-02T23:16:09,875 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:16:09,875 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-02T23:16:09,876 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=231, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 547 msec 2024-12-02T23:16:09,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=231 2024-12-02T23:16:09,962 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-02T23:16:09,963 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-02T23:16:09,969 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:09,969 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1733181369325.baf4bd48ffb757582598f40a8d64aa1c. 2024-12-02T23:16:09,970 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T23:16:09,971 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-02T23:16:09,976 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-02T23:16:09,983 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-02T23:16:09,986 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-02T23:16:09,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733181369986 (current time:1733181369986). 2024-12-02T23:16:09,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T23:16:09,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-02T23:16:09,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T23:16:09,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44451e75, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:16:09,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:16:09,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:16:09,988 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:16:09,988 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:16:09,989 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:16:09,989 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9c9f0c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:16:09,989 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:16:09,989 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:16:09,990 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:16:09,991 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54864, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:16:09,991 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a27aa6a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:16:09,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:16:09,993 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:16:09,993 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:16:09,994 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52032, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:16:09,995 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513. 2024-12-02T23:16:09,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:16:09,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:16:09,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:16:09,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@432a9275, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:16:09,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:16:09,997 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:16:09,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:16:09,998 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:16:09,998 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:16:09,998 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:16:09,998 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@472086fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:16:09,998 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:16:09,999 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:16:09,999 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:16:10,000 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54886, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:16:10,000 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15a51341, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:16:10,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:16:10,002 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:16:10,002 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:16:10,003 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52036, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:16:10,004 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b., hostname=9e5ab3b623d7,41311,1733180945828, seqNum=2] 2024-12-02T23:16:10,004 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:16:10,006 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43402, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:16:10,007 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513. 2024-12-02T23:16:10,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor223.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:16:10,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:16:10,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:16:10,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-02T23:16:10,007 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:16:10,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T23:16:10,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=236, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-02T23:16:10,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 236 2024-12-02T23:16:10,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=236 2024-12-02T23:16:10,011 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T23:16:10,012 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T23:16:10,016 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T23:16:10,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742415_1591 (size=203) 2024-12-02T23:16:10,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742415_1591 (size=203) 2024-12-02T23:16:10,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742415_1591 (size=203) 2024-12-02T23:16:10,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=236 2024-12-02T23:16:10,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=236 2024-12-02T23:16:10,475 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T23:16:10,475 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=237, ppid=236, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure baf4bd48ffb757582598f40a8d64aa1c}, {pid=238, ppid=236, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4872baa9eb6392c2c49a511c37185e0d}] 2024-12-02T23:16:10,476 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=237, ppid=236, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure baf4bd48ffb757582598f40a8d64aa1c 2024-12-02T23:16:10,477 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=238, ppid=236, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4872baa9eb6392c2c49a511c37185e0d 2024-12-02T23:16:10,628 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39847 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=237 2024-12-02T23:16:10,628 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41311 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=238 2024-12-02T23:16:10,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733181369325.baf4bd48ffb757582598f40a8d64aa1c. 2024-12-02T23:16:10,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733181369325.4872baa9eb6392c2c49a511c37185e0d. 2024-12-02T23:16:10,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] regionserver.HRegion(2603): Flush status journal for baf4bd48ffb757582598f40a8d64aa1c: 2024-12-02T23:16:10,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] regionserver.HRegion(2603): Flush status journal for 4872baa9eb6392c2c49a511c37185e0d: 2024-12-02T23:16:10,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1733181369325.4872baa9eb6392c2c49a511c37185e0d. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-02T23:16:10,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1733181369325.baf4bd48ffb757582598f40a8d64aa1c. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-02T23:16:10,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1733181369325.baf4bd48ffb757582598f40a8d64aa1c.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:10,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1733181369325.4872baa9eb6392c2c49a511c37185e0d.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:10,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:16:10,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:16:10,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T23:16:10,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T23:16:10,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742416_1592 (size=82) 2024-12-02T23:16:10,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742416_1592 (size=82) 2024-12-02T23:16:10,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742416_1592 (size=82) 2024-12-02T23:16:10,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742417_1593 (size=82) 2024-12-02T23:16:10,636 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733181369325.baf4bd48ffb757582598f40a8d64aa1c. 2024-12-02T23:16:10,636 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=237 2024-12-02T23:16:10,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742417_1593 (size=82) 2024-12-02T23:16:10,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=237 2024-12-02T23:16:10,637 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region baf4bd48ffb757582598f40a8d64aa1c 2024-12-02T23:16:10,637 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=237, ppid=236, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure baf4bd48ffb757582598f40a8d64aa1c 2024-12-02T23:16:10,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742417_1593 (size=82) 2024-12-02T23:16:10,638 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733181369325.4872baa9eb6392c2c49a511c37185e0d. 2024-12-02T23:16:10,638 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=238 2024-12-02T23:16:10,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=238 2024-12-02T23:16:10,638 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 4872baa9eb6392c2c49a511c37185e0d 2024-12-02T23:16:10,639 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=238, ppid=236, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4872baa9eb6392c2c49a511c37185e0d 2024-12-02T23:16:10,640 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=237, ppid=236, state=SUCCESS, hasLock=false; SnapshotRegionProcedure baf4bd48ffb757582598f40a8d64aa1c in 163 msec 2024-12-02T23:16:10,641 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=238, resume processing ppid=236 2024-12-02T23:16:10,641 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=238, ppid=236, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 4872baa9eb6392c2c49a511c37185e0d in 164 msec 2024-12-02T23:16:10,641 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T23:16:10,642 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T23:16:10,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=236 2024-12-02T23:16:10,643 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T23:16:10,643 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T23:16:10,643 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:16:10,643 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-02T23:16:10,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742418_1594 (size=74) 2024-12-02T23:16:10,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742418_1594 (size=74) 2024-12-02T23:16:10,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742418_1594 (size=74) 2024-12-02T23:16:10,661 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T23:16:10,661 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:10,661 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:10,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742419_1595 (size=697) 2024-12-02T23:16:10,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742419_1595 (size=697) 2024-12-02T23:16:10,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742419_1595 (size=697) 2024-12-02T23:16:10,675 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T23:16:10,678 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T23:16:10,679 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:10,684 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T23:16:10,684 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 236 2024-12-02T23:16:10,685 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=236, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 676 msec 2024-12-02T23:16:11,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=236 2024-12-02T23:16:11,152 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-02T23:16:11,158 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39847 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,,1733181369325.baf4bd48ffb757582598f40a8d64aa1c. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T23:16:11,159 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41311 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,1,1733181369325.4872baa9eb6392c2c49a511c37185e0d. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T23:16:11,160 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-02T23:16:11,162 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:11,162 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1733181369325.baf4bd48ffb757582598f40a8d64aa1c. 2024-12-02T23:16:11,162 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T23:16:11,164 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-02T23:16:11,168 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-02T23:16:11,173 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-02T23:16:11,175 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.3 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-02T23:16:11,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733181371175 (current time:1733181371175). 2024-12-02T23:16:11,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T23:16:11,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-02T23:16:11,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T23:16:11,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3fa299c0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:16:11,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:16:11,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:16:11,177 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:16:11,177 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:16:11,177 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:16:11,177 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73465504, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:16:11,177 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:16:11,177 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:16:11,177 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:16:11,178 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54902, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:16:11,179 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5452bff4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:16:11,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:16:11,179 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:16:11,180 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:16:11,180 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52050, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:16:11,181 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:16:11,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:16:11,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:16:11,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:16:11,181 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:16:11,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f56c820, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:16:11,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ClusterIdFetcher(90): Going to request 9e5ab3b623d7,41513,-1 for getting cluster id 2024-12-02T23:16:11,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T23:16:11,182 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '681687ad-3859-406f-bb7d-05ef89be9a18' 2024-12-02T23:16:11,183 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T23:16:11,183 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "681687ad-3859-406f-bb7d-05ef89be9a18" 2024-12-02T23:16:11,183 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10579f8f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:16:11,183 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9e5ab3b623d7,41513,-1] 2024-12-02T23:16:11,183 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T23:16:11,183 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:16:11,184 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:54918, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T23:16:11,184 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@684f5c84, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T23:16:11,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T23:16:11,185 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9e5ab3b623d7,39847,1733180945622, seqNum=-1] 2024-12-02T23:16:11,186 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:16:11,187 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52064, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:16:11,188 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b., hostname=9e5ab3b623d7,41311,1733180945828, seqNum=2] 2024-12-02T23:16:11,188 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T23:16:11,189 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43404, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T23:16:11,189 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513. 2024-12-02T23:16:11,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor223.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T23:16:11,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:16:11,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:16:11,189 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:16:11,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-02T23:16:11,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T23:16:11,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=239, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-02T23:16:11,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 239 2024-12-02T23:16:11,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-02T23:16:11,192 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T23:16:11,193 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T23:16:11,195 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T23:16:11,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742420_1596 (size=198) 2024-12-02T23:16:11,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742420_1596 (size=198) 2024-12-02T23:16:11,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742420_1596 (size=198) 2024-12-02T23:16:11,204 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T23:16:11,204 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=240, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure baf4bd48ffb757582598f40a8d64aa1c}, {pid=241, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4872baa9eb6392c2c49a511c37185e0d}] 2024-12-02T23:16:11,205 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=240, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure baf4bd48ffb757582598f40a8d64aa1c 2024-12-02T23:16:11,205 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=241, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4872baa9eb6392c2c49a511c37185e0d 2024-12-02T23:16:11,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-02T23:16:11,356 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39847 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=240 2024-12-02T23:16:11,356 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41311 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=241 2024-12-02T23:16:11,356 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733181369325.baf4bd48ffb757582598f40a8d64aa1c. 2024-12-02T23:16:11,356 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733181369325.4872baa9eb6392c2c49a511c37185e0d. 2024-12-02T23:16:11,357 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.HRegion(2902): Flushing baf4bd48ffb757582598f40a8d64aa1c 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-02T23:16:11,357 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.HRegion(2902): Flushing 4872baa9eb6392c2c49a511c37185e0d 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-02T23:16:11,386 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412023b8ed0da29a54ca6b317e8d8d9b8dd82_baf4bd48ffb757582598f40a8d64aa1c is 71, key is 038c77f67fe86c4853ec3fa693b1c79b/cf:q/1733181371157/Put/seqid=0 2024-12-02T23:16:11,386 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202412021aaf56ca7a1d44cb9b8c90ebe004891a_4872baa9eb6392c2c49a511c37185e0d is 71, key is 122a20568982e71907b2139bdd43ae4f/cf:q/1733181371159/Put/seqid=0 2024-12-02T23:16:11,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742421_1597 (size=8102) 2024-12-02T23:16:11,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742421_1597 (size=8102) 2024-12-02T23:16:11,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742421_1597 (size=8102) 2024-12-02T23:16:11,404 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:16:11,409 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202412021aaf56ca7a1d44cb9b8c90ebe004891a_4872baa9eb6392c2c49a511c37185e0d to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b202412021aaf56ca7a1d44cb9b8c90ebe004891a_4872baa9eb6392c2c49a511c37185e0d 2024-12-02T23:16:11,410 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSkipTmp/4872baa9eb6392c2c49a511c37185e0d/.tmp/cf/642442f7747142598bf06025002bc071, store: [table=testtb-testExportFileSystemStateWithSkipTmp family=cf region=4872baa9eb6392c2c49a511c37185e0d] 2024-12-02T23:16:11,410 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSkipTmp/4872baa9eb6392c2c49a511c37185e0d/.tmp/cf/642442f7747142598bf06025002bc071 is 220, key is 120b39a80d182a6566ee290139e726c0d/cf:q/1733181371159/Put/seqid=0 2024-12-02T23:16:11,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742422_1598 (size=5171) 2024-12-02T23:16:11,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742422_1598 (size=5171) 2024-12-02T23:16:11,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742422_1598 (size=5171) 2024-12-02T23:16:11,417 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:16:11,421 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412023b8ed0da29a54ca6b317e8d8d9b8dd82_baf4bd48ffb757582598f40a8d64aa1c to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e202412023b8ed0da29a54ca6b317e8d8d9b8dd82_baf4bd48ffb757582598f40a8d64aa1c 2024-12-02T23:16:11,422 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSkipTmp/baf4bd48ffb757582598f40a8d64aa1c/.tmp/cf/c465a32213ff4d52ade7df8d79426b3c, store: [table=testtb-testExportFileSystemStateWithSkipTmp family=cf region=baf4bd48ffb757582598f40a8d64aa1c] 2024-12-02T23:16:11,423 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSkipTmp/baf4bd48ffb757582598f40a8d64aa1c/.tmp/cf/c465a32213ff4d52ade7df8d79426b3c is 220, key is 05266997e8373e43d9b41deeebc993e75/cf:q/1733181371157/Put/seqid=0 2024-12-02T23:16:11,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742423_1599 (size=15311) 2024-12-02T23:16:11,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742423_1599 (size=15311) 2024-12-02T23:16:11,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742423_1599 (size=15311) 2024-12-02T23:16:11,435 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSkipTmp/4872baa9eb6392c2c49a511c37185e0d/.tmp/cf/642442f7747142598bf06025002bc071 2024-12-02T23:16:11,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742424_1600 (size=6176) 2024-12-02T23:16:11,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742424_1600 (size=6176) 2024-12-02T23:16:11,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742424_1600 (size=6176) 2024-12-02T23:16:11,441 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=266, hasBloomFilter=true, into tmp file hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSkipTmp/baf4bd48ffb757582598f40a8d64aa1c/.tmp/cf/c465a32213ff4d52ade7df8d79426b3c 2024-12-02T23:16:11,441 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSkipTmp/4872baa9eb6392c2c49a511c37185e0d/.tmp/cf/642442f7747142598bf06025002bc071 as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSkipTmp/4872baa9eb6392c2c49a511c37185e0d/cf/642442f7747142598bf06025002bc071 2024-12-02T23:16:11,446 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSkipTmp/4872baa9eb6392c2c49a511c37185e0d/cf/642442f7747142598bf06025002bc071, entries=46, sequenceid=6, filesize=15.0 K 2024-12-02T23:16:11,446 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSkipTmp/baf4bd48ffb757582598f40a8d64aa1c/.tmp/cf/c465a32213ff4d52ade7df8d79426b3c as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSkipTmp/baf4bd48ffb757582598f40a8d64aa1c/cf/c465a32213ff4d52ade7df8d79426b3c 2024-12-02T23:16:11,447 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 4872baa9eb6392c2c49a511c37185e0d in 90ms, sequenceid=6, compaction requested=false 2024-12-02T23:16:11,447 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-12-02T23:16:11,448 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.HRegion(2603): Flush status journal for 4872baa9eb6392c2c49a511c37185e0d: 2024-12-02T23:16:11,448 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1733181369325.4872baa9eb6392c2c49a511c37185e0d. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-02T23:16:11,448 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1733181369325.4872baa9eb6392c2c49a511c37185e0d.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:11,448 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:16:11,448 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSkipTmp/4872baa9eb6392c2c49a511c37185e0d/cf/642442f7747142598bf06025002bc071] hfiles 2024-12-02T23:16:11,448 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSkipTmp/4872baa9eb6392c2c49a511c37185e0d/cf/642442f7747142598bf06025002bc071 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:11,452 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSkipTmp/baf4bd48ffb757582598f40a8d64aa1c/cf/c465a32213ff4d52ade7df8d79426b3c, entries=4, sequenceid=6, filesize=6.0 K 2024-12-02T23:16:11,453 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for baf4bd48ffb757582598f40a8d64aa1c in 96ms, sequenceid=6, compaction requested=false 2024-12-02T23:16:11,453 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.HRegion(2603): Flush status journal for baf4bd48ffb757582598f40a8d64aa1c: 2024-12-02T23:16:11,453 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1733181369325.baf4bd48ffb757582598f40a8d64aa1c. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-02T23:16:11,453 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1733181369325.baf4bd48ffb757582598f40a8d64aa1c.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:11,453 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T23:16:11,453 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSkipTmp/baf4bd48ffb757582598f40a8d64aa1c/cf/c465a32213ff4d52ade7df8d79426b3c] hfiles 2024-12-02T23:16:11,453 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSkipTmp/baf4bd48ffb757582598f40a8d64aa1c/cf/c465a32213ff4d52ade7df8d79426b3c for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:11,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742425_1601 (size=121) 2024-12-02T23:16:11,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742425_1601 (size=121) 2024-12-02T23:16:11,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742425_1601 (size=121) 2024-12-02T23:16:11,461 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733181369325.4872baa9eb6392c2c49a511c37185e0d. 2024-12-02T23:16:11,461 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=241 2024-12-02T23:16:11,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=241 2024-12-02T23:16:11,462 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 4872baa9eb6392c2c49a511c37185e0d 2024-12-02T23:16:11,462 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=241, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4872baa9eb6392c2c49a511c37185e0d 2024-12-02T23:16:11,464 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=241, ppid=239, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 4872baa9eb6392c2c49a511c37185e0d in 259 msec 2024-12-02T23:16:11,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742426_1602 (size=121) 2024-12-02T23:16:11,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742426_1602 (size=121) 2024-12-02T23:16:11,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742426_1602 (size=121) 2024-12-02T23:16:11,470 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733181369325.baf4bd48ffb757582598f40a8d64aa1c. 2024-12-02T23:16:11,470 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/9e5ab3b623d7:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=240 2024-12-02T23:16:11,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster(4169): Remote procedure done, pid=240 2024-12-02T23:16:11,471 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region baf4bd48ffb757582598f40a8d64aa1c 2024-12-02T23:16:11,471 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=240, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure baf4bd48ffb757582598f40a8d64aa1c 2024-12-02T23:16:11,474 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=240, resume processing ppid=239 2024-12-02T23:16:11,474 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=240, ppid=239, state=SUCCESS, hasLock=false; SnapshotRegionProcedure baf4bd48ffb757582598f40a8d64aa1c in 268 msec 2024-12-02T23:16:11,474 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T23:16:11,474 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T23:16:11,475 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T23:16:11,475 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T23:16:11,475 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T23:16:11,476 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b202412021aaf56ca7a1d44cb9b8c90ebe004891a_4872baa9eb6392c2c49a511c37185e0d, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e202412023b8ed0da29a54ca6b317e8d8d9b8dd82_baf4bd48ffb757582598f40a8d64aa1c] hfiles 2024-12-02T23:16:11,476 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b202412021aaf56ca7a1d44cb9b8c90ebe004891a_4872baa9eb6392c2c49a511c37185e0d 2024-12-02T23:16:11,476 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e202412023b8ed0da29a54ca6b317e8d8d9b8dd82_baf4bd48ffb757582598f40a8d64aa1c 2024-12-02T23:16:11,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742427_1603 (size=305) 2024-12-02T23:16:11,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742427_1603 (size=305) 2024-12-02T23:16:11,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742427_1603 (size=305) 2024-12-02T23:16:11,496 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T23:16:11,496 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:11,497 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:11,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-02T23:16:11,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742428_1604 (size=1007) 2024-12-02T23:16:11,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742428_1604 (size=1007) 2024-12-02T23:16:11,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742428_1604 (size=1007) 2024-12-02T23:16:11,519 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T23:16:11,527 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T23:16:11,527 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:11,528 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T23:16:11,528 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 239 2024-12-02T23:16:11,530 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=239, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 338 msec 2024-12-02T23:16:11,801 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_1/usercache/jenkins/appcache/application_1733180955226_0010/container_1733180955226_0010_01_000005/launch_container.sh] 2024-12-02T23:16:11,801 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_1/usercache/jenkins/appcache/application_1733180955226_0010/container_1733180955226_0010_01_000005/container_tokens] 2024-12-02T23:16:11,801 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_1/usercache/jenkins/appcache/application_1733180955226_0010/container_1733180955226_0010_01_000005/sysfs] 2024-12-02T23:16:11,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-02T23:16:11,822 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-02T23:16:11,822 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181371822 2024-12-02T23:16:11,822 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:33369, tgtDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181371822, rawTgtDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181371822, srcFsUri=hdfs://localhost:33369, srcDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:16:11,851 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:33369, inputRoot=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156 2024-12-02T23:16:11,851 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_71179475_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181371822, skipTmp=true, initialOutputSnapshotDir=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181371822/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:11,852 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-02T23:16:11,857 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181371822/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:11,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742429_1605 (size=198) 2024-12-02T23:16:11,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742429_1605 (size=198) 2024-12-02T23:16:11,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742429_1605 (size=198) 2024-12-02T23:16:11,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742430_1606 (size=1007) 2024-12-02T23:16:11,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742430_1606 (size=1007) 2024-12-02T23:16:11,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742430_1606 (size=1007) 2024-12-02T23:16:11,879 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:16:11,880 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:16:11,880 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:16:13,102 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0010_000001 (auth:SIMPLE) from 127.0.0.1:38026 2024-12-02T23:16:13,132 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_3/usercache/jenkins/appcache/application_1733180955226_0010/container_1733180955226_0010_01_000001/launch_container.sh] 2024-12-02T23:16:13,132 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_3/usercache/jenkins/appcache/application_1733180955226_0010/container_1733180955226_0010_01_000001/container_tokens] 2024-12-02T23:16:13,132 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_3/usercache/jenkins/appcache/application_1733180955226_0010/container_1733180955226_0010_01_000001/sysfs] 2024-12-02T23:16:13,154 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop-15836919434089410468.jar 2024-12-02T23:16:13,154 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:16:13,155 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:16:13,245 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop-600482156994479145.jar 2024-12-02T23:16:13,245 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:16:13,245 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:16:13,246 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:16:13,246 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:16:13,246 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:16:13,246 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-12-02T23:16:13,247 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-02T23:16:13,247 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-02T23:16:13,247 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-02T23:16:13,248 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-02T23:16:13,248 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-02T23:16:13,248 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-02T23:16:13,249 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-02T23:16:13,249 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-02T23:16:13,249 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-02T23:16:13,250 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-02T23:16:13,250 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-02T23:16:13,250 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:16:13,251 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:16:13,251 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T23:16:13,251 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:16:13,252 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T23:16:13,252 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T23:16:13,252 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T23:16:13,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742431_1607 (size=131440) 2024-12-02T23:16:13,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742431_1607 (size=131440) 2024-12-02T23:16:13,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742431_1607 (size=131440) 2024-12-02T23:16:13,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742432_1608 (size=4188619) 2024-12-02T23:16:13,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742432_1608 (size=4188619) 2024-12-02T23:16:13,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742432_1608 (size=4188619) 2024-12-02T23:16:13,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742433_1609 (size=1323991) 2024-12-02T23:16:13,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742433_1609 (size=1323991) 2024-12-02T23:16:13,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742433_1609 (size=1323991) 2024-12-02T23:16:13,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742434_1610 (size=903926) 2024-12-02T23:16:13,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742434_1610 (size=903926) 2024-12-02T23:16:13,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742434_1610 (size=903926) 2024-12-02T23:16:14,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742435_1611 (size=8360083) 2024-12-02T23:16:14,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742435_1611 (size=8360083) 2024-12-02T23:16:14,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742435_1611 (size=8360083) 2024-12-02T23:16:14,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742436_1612 (size=1877034) 2024-12-02T23:16:14,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742436_1612 (size=1877034) 2024-12-02T23:16:14,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742436_1612 (size=1877034) 2024-12-02T23:16:14,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742437_1613 (size=77835) 2024-12-02T23:16:14,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742437_1613 (size=77835) 2024-12-02T23:16:14,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742437_1613 (size=77835) 2024-12-02T23:16:14,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742438_1614 (size=6424743) 2024-12-02T23:16:14,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742438_1614 (size=6424743) 2024-12-02T23:16:14,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742438_1614 (size=6424743) 2024-12-02T23:16:14,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742439_1615 (size=443172) 2024-12-02T23:16:14,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742439_1615 (size=443172) 2024-12-02T23:16:14,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742439_1615 (size=443172) 2024-12-02T23:16:14,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742440_1616 (size=30949) 2024-12-02T23:16:14,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742440_1616 (size=30949) 2024-12-02T23:16:14,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742440_1616 (size=30949) 2024-12-02T23:16:14,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742441_1617 (size=1597213) 2024-12-02T23:16:14,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742441_1617 (size=1597213) 2024-12-02T23:16:14,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742441_1617 (size=1597213) 2024-12-02T23:16:14,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742442_1618 (size=4695811) 2024-12-02T23:16:14,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742442_1618 (size=4695811) 2024-12-02T23:16:14,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742442_1618 (size=4695811) 2024-12-02T23:16:14,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742443_1619 (size=232957) 2024-12-02T23:16:14,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742443_1619 (size=232957) 2024-12-02T23:16:14,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742443_1619 (size=232957) 2024-12-02T23:16:14,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742444_1620 (size=127628) 2024-12-02T23:16:14,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742444_1620 (size=127628) 2024-12-02T23:16:14,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742444_1620 (size=127628) 2024-12-02T23:16:14,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742445_1621 (size=20406) 2024-12-02T23:16:14,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742445_1621 (size=20406) 2024-12-02T23:16:14,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742445_1621 (size=20406) 2024-12-02T23:16:14,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742446_1622 (size=5175431) 2024-12-02T23:16:14,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742446_1622 (size=5175431) 2024-12-02T23:16:14,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742446_1622 (size=5175431) 2024-12-02T23:16:14,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742447_1623 (size=217634) 2024-12-02T23:16:14,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742447_1623 (size=217634) 2024-12-02T23:16:14,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742447_1623 (size=217634) 2024-12-02T23:16:14,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742448_1624 (size=1832290) 2024-12-02T23:16:14,581 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T23:16:14,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742448_1624 (size=1832290) 2024-12-02T23:16:14,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742448_1624 (size=1832290) 2024-12-02T23:16:14,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742449_1625 (size=322274) 2024-12-02T23:16:14,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742449_1625 (size=322274) 2024-12-02T23:16:14,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742449_1625 (size=322274) 2024-12-02T23:16:14,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742450_1626 (size=503880) 2024-12-02T23:16:14,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742450_1626 (size=503880) 2024-12-02T23:16:14,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742450_1626 (size=503880) 2024-12-02T23:16:15,203 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:15,203 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp Metrics about Tables on a single HBase RegionServer 2024-12-02T23:16:15,204 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-02T23:16:15,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742451_1627 (size=29229) 2024-12-02T23:16:15,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742451_1627 (size=29229) 2024-12-02T23:16:15,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742451_1627 (size=29229) 2024-12-02T23:16:15,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742452_1628 (size=24096) 2024-12-02T23:16:15,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742452_1628 (size=24096) 2024-12-02T23:16:15,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742452_1628 (size=24096) 2024-12-02T23:16:15,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742453_1629 (size=111872) 2024-12-02T23:16:15,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742453_1629 (size=111872) 2024-12-02T23:16:15,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742453_1629 (size=111872) 2024-12-02T23:16:15,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742454_1630 (size=45609) 2024-12-02T23:16:15,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742454_1630 (size=45609) 2024-12-02T23:16:15,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742454_1630 (size=45609) 2024-12-02T23:16:15,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742455_1631 (size=136454) 2024-12-02T23:16:15,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742455_1631 (size=136454) 2024-12-02T23:16:15,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742455_1631 (size=136454) 2024-12-02T23:16:15,948 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-02T23:16:15,951 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemStateWithSkipTmp' hfile list 2024-12-02T23:16:15,954 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=15.0 K 2024-12-02T23:16:15,954 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=7.9 K 2024-12-02T23:16:15,954 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=6.0 K 2024-12-02T23:16:15,954 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.0 K 2024-12-02T23:16:15,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742456_1632 (size=1079) 2024-12-02T23:16:15,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742456_1632 (size=1079) 2024-12-02T23:16:15,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742456_1632 (size=1079) 2024-12-02T23:16:16,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742457_1633 (size=35) 2024-12-02T23:16:16,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742457_1633 (size=35) 2024-12-02T23:16:16,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742457_1633 (size=35) 2024-12-02T23:16:16,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742458_1634 (size=304166) 2024-12-02T23:16:16,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742458_1634 (size=304166) 2024-12-02T23:16:16,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742458_1634 (size=304166) 2024-12-02T23:16:16,103 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T23:16:16,103 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T23:16:16,701 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0011_000001 (auth:SIMPLE) from 127.0.0.1:49792 2024-12-02T23:16:20,707 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T23:16:21,333 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-12-02T23:16:21,333 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=82.74 KB heapSize=130.91 KB 2024-12-02T23:16:21,369 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/.tmp/info/1a1dd2f6094f447dba92759d6d4b5041 is 209, key is testtb-testExportFileSystemStateWithSkipTmp,1,1733181369325.4872baa9eb6392c2c49a511c37185e0d./info:regioninfo/1733181369816/Put/seqid=0 2024-12-02T23:16:21,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742459_1635 (size=18374) 2024-12-02T23:16:21,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742459_1635 (size=18374) 2024-12-02T23:16:21,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742459_1635 (size=18374) 2024-12-02T23:16:21,390 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=70.64 KB at sequenceid=227 (bloomFilter=true), to=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/.tmp/info/1a1dd2f6094f447dba92759d6d4b5041 2024-12-02T23:16:21,415 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/.tmp/ns/a088336815b74d70a426c63143a93bae is 124, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235939.e51dffa61d3cd9bf7afa2719f876bf5c./ns:/1733181259861/DeleteFamily/seqid=0 2024-12-02T23:16:21,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742460_1636 (size=8137) 2024-12-02T23:16:21,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742460_1636 (size=8137) 2024-12-02T23:16:21,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742460_1636 (size=8137) 2024-12-02T23:16:21,438 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.57 KB at sequenceid=227 (bloomFilter=true), to=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/.tmp/ns/a088336815b74d70a426c63143a93bae 2024-12-02T23:16:21,465 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/.tmp/rep_barrier/4bec7f3956b147a4a2535899620c2ea5 is 133, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235939.e51dffa61d3cd9bf7afa2719f876bf5c./rep_barrier:/1733181259861/DeleteFamily/seqid=0 2024-12-02T23:16:21,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742461_1637 (size=8458) 2024-12-02T23:16:21,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742461_1637 (size=8458) 2024-12-02T23:16:21,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742461_1637 (size=8458) 2024-12-02T23:16:21,495 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.71 KB at sequenceid=227 (bloomFilter=true), to=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/.tmp/rep_barrier/4bec7f3956b147a4a2535899620c2ea5 2024-12-02T23:16:21,567 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/.tmp/table/41bfb3093aaf460485c2ab3f9925ca2a is 127, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733181235939.e51dffa61d3cd9bf7afa2719f876bf5c./table:/1733181259861/DeleteFamily/seqid=0 2024-12-02T23:16:21,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742462_1638 (size=9363) 2024-12-02T23:16:21,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742462_1638 (size=9363) 2024-12-02T23:16:21,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742462_1638 (size=9363) 2024-12-02T23:16:21,641 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.82 KB at sequenceid=227 (bloomFilter=true), to=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/.tmp/table/41bfb3093aaf460485c2ab3f9925ca2a 2024-12-02T23:16:21,657 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/.tmp/info/1a1dd2f6094f447dba92759d6d4b5041 as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/info/1a1dd2f6094f447dba92759d6d4b5041 2024-12-02T23:16:21,663 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/info/1a1dd2f6094f447dba92759d6d4b5041, entries=100, sequenceid=227, filesize=17.9 K 2024-12-02T23:16:21,664 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/.tmp/ns/a088336815b74d70a426c63143a93bae as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/ns/a088336815b74d70a426c63143a93bae 2024-12-02T23:16:21,668 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/ns/a088336815b74d70a426c63143a93bae, entries=26, sequenceid=227, filesize=7.9 K 2024-12-02T23:16:21,670 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/.tmp/rep_barrier/4bec7f3956b147a4a2535899620c2ea5 as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/rep_barrier/4bec7f3956b147a4a2535899620c2ea5 2024-12-02T23:16:21,690 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/rep_barrier/4bec7f3956b147a4a2535899620c2ea5, entries=24, sequenceid=227, filesize=8.3 K 2024-12-02T23:16:21,691 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/.tmp/table/41bfb3093aaf460485c2ab3f9925ca2a as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/table/41bfb3093aaf460485c2ab3f9925ca2a 2024-12-02T23:16:21,697 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/table/41bfb3093aaf460485c2ab3f9925ca2a, entries=42, sequenceid=227, filesize=9.1 K 2024-12-02T23:16:21,698 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~82.74 KB/84724, heapSize ~130.84 KB/133984, currentSize=0 B/0 for 1588230740 in 365ms, sequenceid=227, compaction requested=false 2024-12-02T23:16:21,698 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-02T23:16:23,379 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0011_000001 (auth:SIMPLE) from 127.0.0.1:39372 2024-12-02T23:16:23,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742463_1639 (size=349888) 2024-12-02T23:16:23,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742463_1639 (size=349888) 2024-12-02T23:16:23,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742463_1639 (size=349888) 2024-12-02T23:16:25,639 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0011_000001 (auth:SIMPLE) from 127.0.0.1:58358 2024-12-02T23:16:25,639 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0011_000001 (auth:SIMPLE) from 127.0.0.1:36502 2024-12-02T23:16:26,470 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0011_000001 (auth:SIMPLE) from 127.0.0.1:58368 2024-12-02T23:16:26,471 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0011_000001 (auth:SIMPLE) from 127.0.0.1:36510 2024-12-02T23:16:29,106 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1733180955226_0011_01_000006 while processing FINISH_CONTAINERS event 2024-12-02T23:16:31,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742464_1640 (size=15311) 2024-12-02T23:16:31,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742464_1640 (size=15311) 2024-12-02T23:16:31,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742464_1640 (size=15311) 2024-12-02T23:16:31,723 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_2/usercache/jenkins/appcache/application_1733180955226_0011/container_1733180955226_0011_01_000002/launch_container.sh] 2024-12-02T23:16:31,723 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_2/usercache/jenkins/appcache/application_1733180955226_0011/container_1733180955226_0011_01_000002/container_tokens] 2024-12-02T23:16:31,723 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_2/usercache/jenkins/appcache/application_1733180955226_0011/container_1733180955226_0011_01_000002/sysfs] 2024-12-02T23:16:32,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742466_1642 (size=6176) 2024-12-02T23:16:32,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742466_1642 (size=6176) 2024-12-02T23:16:32,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742466_1642 (size=6176) 2024-12-02T23:16:32,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742467_1643 (size=5171) 2024-12-02T23:16:32,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742467_1643 (size=5171) 2024-12-02T23:16:32,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742467_1643 (size=5171) 2024-12-02T23:16:33,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742468_1644 (size=8102) 2024-12-02T23:16:33,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742468_1644 (size=8102) 2024-12-02T23:16:33,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742468_1644 (size=8102) 2024-12-02T23:16:33,149 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_0/usercache/jenkins/appcache/application_1733180955226_0011/container_1733180955226_0011_01_000004/launch_container.sh] 2024-12-02T23:16:33,149 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_0/usercache/jenkins/appcache/application_1733180955226_0011/container_1733180955226_0011_01_000004/container_tokens] 2024-12-02T23:16:33,149 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_0/usercache/jenkins/appcache/application_1733180955226_0011/container_1733180955226_0011_01_000004/sysfs] 2024-12-02T23:16:33,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742465_1641 (size=31803) 2024-12-02T23:16:33,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742465_1641 (size=31803) 2024-12-02T23:16:33,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742465_1641 (size=31803) 2024-12-02T23:16:33,173 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_0/usercache/jenkins/appcache/application_1733180955226_0011/container_1733180955226_0011_01_000005/launch_container.sh] 2024-12-02T23:16:33,174 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_0/usercache/jenkins/appcache/application_1733180955226_0011/container_1733180955226_0011_01_000005/container_tokens] 2024-12-02T23:16:33,174 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_0/usercache/jenkins/appcache/application_1733180955226_0011/container_1733180955226_0011_01_000005/sysfs] 2024-12-02T23:16:33,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742469_1645 (size=477) 2024-12-02T23:16:33,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742469_1645 (size=477) 2024-12-02T23:16:33,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742469_1645 (size=477) 2024-12-02T23:16:33,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742470_1646 (size=31803) 2024-12-02T23:16:33,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742470_1646 (size=31803) 2024-12-02T23:16:33,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742470_1646 (size=31803) 2024-12-02T23:16:33,236 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_1/usercache/jenkins/appcache/application_1733180955226_0011/container_1733180955226_0011_01_000003/launch_container.sh] 2024-12-02T23:16:33,236 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_1/usercache/jenkins/appcache/application_1733180955226_0011/container_1733180955226_0011_01_000003/container_tokens] 2024-12-02T23:16:33,236 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-0_1/usercache/jenkins/appcache/application_1733180955226_0011/container_1733180955226_0011_01_000003/sysfs] 2024-12-02T23:16:33,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742471_1647 (size=349888) 2024-12-02T23:16:33,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742471_1647 (size=349888) 2024-12-02T23:16:33,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742471_1647 (size=349888) 2024-12-02T23:16:33,285 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0011_000001 (auth:SIMPLE) from 127.0.0.1:60372 2024-12-02T23:16:33,293 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0011_000001 (auth:SIMPLE) from 127.0.0.1:35048 2024-12-02T23:16:33,297 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733180955226_0011_000001 (auth:SIMPLE) from 127.0.0.1:60376 2024-12-02T23:16:33,600 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T23:16:34,462 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-02T23:16:34,462 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-02T23:16:34,469 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:34,470 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-02T23:16:34,470 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-02T23:16:34,470 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_71179475_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:34,470 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-02T23:16:34,470 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-02T23:16:34,471 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_71179475_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181371822/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181371822/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:34,471 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181371822/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-02T23:16:34,471 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/export-test/export-1733181371822/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-02T23:16:34,477 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.3 disable testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:34,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=242, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:34,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-02T23:16:34,481 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181394480"}]},"ts":"1733181394480"} 2024-12-02T23:16:34,483 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLING in hbase:meta 2024-12-02T23:16:34,483 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLING 2024-12-02T23:16:34,483 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=243, ppid=242, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp}] 2024-12-02T23:16:34,485 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=244, ppid=243, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=baf4bd48ffb757582598f40a8d64aa1c, UNASSIGN}, {pid=245, ppid=243, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=4872baa9eb6392c2c49a511c37185e0d, UNASSIGN}] 2024-12-02T23:16:34,486 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=245, ppid=243, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=4872baa9eb6392c2c49a511c37185e0d, UNASSIGN 2024-12-02T23:16:34,486 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=244, ppid=243, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=baf4bd48ffb757582598f40a8d64aa1c, UNASSIGN 2024-12-02T23:16:34,487 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=245 updating hbase:meta row=4872baa9eb6392c2c49a511c37185e0d, regionState=CLOSING, regionLocation=9e5ab3b623d7,41311,1733180945828 2024-12-02T23:16:34,488 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=244 updating hbase:meta row=baf4bd48ffb757582598f40a8d64aa1c, regionState=CLOSING, regionLocation=9e5ab3b623d7,39847,1733180945622 2024-12-02T23:16:34,492 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=245, ppid=243, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=4872baa9eb6392c2c49a511c37185e0d, UNASSIGN because future has completed 2024-12-02T23:16:34,492 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T23:16:34,492 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=246, ppid=245, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4872baa9eb6392c2c49a511c37185e0d, server=9e5ab3b623d7,41311,1733180945828}] 2024-12-02T23:16:34,493 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=244, ppid=243, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=baf4bd48ffb757582598f40a8d64aa1c, UNASSIGN because future has completed 2024-12-02T23:16:34,493 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T23:16:34,493 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=247, ppid=244, state=RUNNABLE, hasLock=false; CloseRegionProcedure baf4bd48ffb757582598f40a8d64aa1c, server=9e5ab3b623d7,39847,1733180945622}] 2024-12-02T23:16:34,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-02T23:16:34,645 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] handler.UnassignRegionHandler(122): Close 4872baa9eb6392c2c49a511c37185e0d 2024-12-02T23:16:34,645 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T23:16:34,646 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] regionserver.HRegion(1722): Closing 4872baa9eb6392c2c49a511c37185e0d, disabling compactions & flushes 2024-12-02T23:16:34,646 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1733181369325.4872baa9eb6392c2c49a511c37185e0d. 2024-12-02T23:16:34,646 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733181369325.4872baa9eb6392c2c49a511c37185e0d. 2024-12-02T23:16:34,646 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733181369325.4872baa9eb6392c2c49a511c37185e0d. after waiting 0 ms 2024-12-02T23:16:34,646 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1733181369325.4872baa9eb6392c2c49a511c37185e0d. 2024-12-02T23:16:34,646 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] handler.UnassignRegionHandler(122): Close baf4bd48ffb757582598f40a8d64aa1c 2024-12-02T23:16:34,646 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T23:16:34,646 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] regionserver.HRegion(1722): Closing baf4bd48ffb757582598f40a8d64aa1c, disabling compactions & flushes 2024-12-02T23:16:34,646 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1733181369325.baf4bd48ffb757582598f40a8d64aa1c. 2024-12-02T23:16:34,646 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733181369325.baf4bd48ffb757582598f40a8d64aa1c. 2024-12-02T23:16:34,646 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733181369325.baf4bd48ffb757582598f40a8d64aa1c. after waiting 0 ms 2024-12-02T23:16:34,646 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1733181369325.baf4bd48ffb757582598f40a8d64aa1c. 2024-12-02T23:16:34,651 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSkipTmp/baf4bd48ffb757582598f40a8d64aa1c/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T23:16:34,662 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:16:34,662 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSkipTmp/4872baa9eb6392c2c49a511c37185e0d/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T23:16:34,662 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1733181369325.baf4bd48ffb757582598f40a8d64aa1c. 2024-12-02T23:16:34,662 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] regionserver.HRegion(1676): Region close journal for baf4bd48ffb757582598f40a8d64aa1c: Waiting for close lock at 1733181394646Running coprocessor pre-close hooks at 1733181394646Disabling compacts and flushes for region at 1733181394646Disabling writes for close at 1733181394646Writing region close event to WAL at 1733181394647 (+1 ms)Running coprocessor post-close hooks at 1733181394661 (+14 ms)Closed at 1733181394662 (+1 ms) 2024-12-02T23:16:34,662 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:16:34,662 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1733181369325.4872baa9eb6392c2c49a511c37185e0d. 2024-12-02T23:16:34,662 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] regionserver.HRegion(1676): Region close journal for 4872baa9eb6392c2c49a511c37185e0d: Waiting for close lock at 1733181394645Running coprocessor pre-close hooks at 1733181394645Disabling compacts and flushes for region at 1733181394646 (+1 ms)Disabling writes for close at 1733181394646Writing region close event to WAL at 1733181394647 (+1 ms)Running coprocessor post-close hooks at 1733181394662 (+15 ms)Closed at 1733181394662 2024-12-02T23:16:34,664 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] handler.UnassignRegionHandler(157): Closed baf4bd48ffb757582598f40a8d64aa1c 2024-12-02T23:16:34,665 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=244 updating hbase:meta row=baf4bd48ffb757582598f40a8d64aa1c, regionState=CLOSED 2024-12-02T23:16:34,665 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] handler.UnassignRegionHandler(157): Closed 4872baa9eb6392c2c49a511c37185e0d 2024-12-02T23:16:34,666 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=245 updating hbase:meta row=4872baa9eb6392c2c49a511c37185e0d, regionState=CLOSED 2024-12-02T23:16:34,666 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=247, ppid=244, state=RUNNABLE, hasLock=false; CloseRegionProcedure baf4bd48ffb757582598f40a8d64aa1c, server=9e5ab3b623d7,39847,1733180945622 because future has completed 2024-12-02T23:16:34,668 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=246, ppid=245, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4872baa9eb6392c2c49a511c37185e0d, server=9e5ab3b623d7,41311,1733180945828 because future has completed 2024-12-02T23:16:34,669 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=247, resume processing ppid=244 2024-12-02T23:16:34,669 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=247, ppid=244, state=SUCCESS, hasLock=false; CloseRegionProcedure baf4bd48ffb757582598f40a8d64aa1c, server=9e5ab3b623d7,39847,1733180945622 in 174 msec 2024-12-02T23:16:34,670 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=244, ppid=243, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=baf4bd48ffb757582598f40a8d64aa1c, UNASSIGN in 184 msec 2024-12-02T23:16:34,670 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=246, resume processing ppid=245 2024-12-02T23:16:34,670 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=246, ppid=245, state=SUCCESS, hasLock=false; CloseRegionProcedure 4872baa9eb6392c2c49a511c37185e0d, server=9e5ab3b623d7,41311,1733180945828 in 176 msec 2024-12-02T23:16:34,672 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=245, resume processing ppid=243 2024-12-02T23:16:34,672 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=245, ppid=243, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=4872baa9eb6392c2c49a511c37185e0d, UNASSIGN in 185 msec 2024-12-02T23:16:34,674 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=243, resume processing ppid=242 2024-12-02T23:16:34,674 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=243, ppid=242, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 189 msec 2024-12-02T23:16:34,675 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733181394675"}]},"ts":"1733181394675"} 2024-12-02T23:16:34,676 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLED in hbase:meta 2024-12-02T23:16:34,676 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLED 2024-12-02T23:16:34,678 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=242, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 200 msec 2024-12-02T23:16:34,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-02T23:16:34,792 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-02T23:16:34,792 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.3 delete testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:34,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] procedure2.ProcedureExecutor(1139): Stored pid=248, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:34,794 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=248, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:34,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:34,795 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=248, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:34,797 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41311 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:34,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:34,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:34,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:34,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:34,801 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-02T23:16:34,801 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-02T23:16:34,801 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-02T23:16:34,801 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-02T23:16:34,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:34,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:34,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:16:34,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:34,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:16:34,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:16:34,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:34,802 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T23:16:34,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=248 2024-12-02T23:16:34,803 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:16:34,803 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:16:34,803 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:16:34,803 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T23:16:34,811 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSkipTmp/baf4bd48ffb757582598f40a8d64aa1c 2024-12-02T23:16:34,812 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSkipTmp/4872baa9eb6392c2c49a511c37185e0d 2024-12-02T23:16:34,813 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSkipTmp/baf4bd48ffb757582598f40a8d64aa1c/cf, FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSkipTmp/baf4bd48ffb757582598f40a8d64aa1c/recovered.edits] 2024-12-02T23:16:34,813 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSkipTmp/4872baa9eb6392c2c49a511c37185e0d/cf, FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSkipTmp/4872baa9eb6392c2c49a511c37185e0d/recovered.edits] 2024-12-02T23:16:34,816 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSkipTmp/baf4bd48ffb757582598f40a8d64aa1c/cf/c465a32213ff4d52ade7df8d79426b3c to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/baf4bd48ffb757582598f40a8d64aa1c/cf/c465a32213ff4d52ade7df8d79426b3c 2024-12-02T23:16:34,816 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSkipTmp/4872baa9eb6392c2c49a511c37185e0d/cf/642442f7747142598bf06025002bc071 to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/4872baa9eb6392c2c49a511c37185e0d/cf/642442f7747142598bf06025002bc071 2024-12-02T23:16:34,829 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSkipTmp/baf4bd48ffb757582598f40a8d64aa1c/recovered.edits/9.seqid to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/baf4bd48ffb757582598f40a8d64aa1c/recovered.edits/9.seqid 2024-12-02T23:16:34,829 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSkipTmp/4872baa9eb6392c2c49a511c37185e0d/recovered.edits/9.seqid to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/4872baa9eb6392c2c49a511c37185e0d/recovered.edits/9.seqid 2024-12-02T23:16:34,830 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSkipTmp/baf4bd48ffb757582598f40a8d64aa1c 2024-12-02T23:16:34,830 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testtb-testExportFileSystemStateWithSkipTmp/4872baa9eb6392c2c49a511c37185e0d 2024-12-02T23:16:34,830 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSkipTmp regions 2024-12-02T23:16:34,830 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a 2024-12-02T23:16:34,831 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf] 2024-12-02T23:16:34,833 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b202412021aaf56ca7a1d44cb9b8c90ebe004891a_4872baa9eb6392c2c49a511c37185e0d to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b202412021aaf56ca7a1d44cb9b8c90ebe004891a_4872baa9eb6392c2c49a511c37185e0d 2024-12-02T23:16:34,835 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e202412023b8ed0da29a54ca6b317e8d8d9b8dd82_baf4bd48ffb757582598f40a8d64aa1c to hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e202412023b8ed0da29a54ca6b317e8d8d9b8dd82_baf4bd48ffb757582598f40a8d64aa1c 2024-12-02T23:16:34,835 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a 2024-12-02T23:16:34,847 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=248, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:34,854 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSkipTmp from hbase:meta 2024-12-02T23:16:34,857 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSkipTmp' descriptor. 2024-12-02T23:16:34,857 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=248, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:34,858 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSkipTmp' from region states. 2024-12-02T23:16:34,858 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1733181369325.baf4bd48ffb757582598f40a8d64aa1c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733181394858"}]},"ts":"9223372036854775807"} 2024-12-02T23:16:34,858 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1733181369325.4872baa9eb6392c2c49a511c37185e0d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733181394858"}]},"ts":"9223372036854775807"} 2024-12-02T23:16:34,860 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-02T23:16:34,860 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => baf4bd48ffb757582598f40a8d64aa1c, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733181369325.baf4bd48ffb757582598f40a8d64aa1c.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 4872baa9eb6392c2c49a511c37185e0d, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733181369325.4872baa9eb6392c2c49a511c37185e0d.', STARTKEY => '1', ENDKEY => ''}] 2024-12-02T23:16:34,860 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSkipTmp' as deleted. 2024-12-02T23:16:34,860 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733181394860"}]},"ts":"9223372036854775807"} 2024-12-02T23:16:34,861 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSkipTmp state from META 2024-12-02T23:16:34,862 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=248, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:34,863 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=248, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 70 msec 2024-12-02T23:16:34,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=248 2024-12-02T23:16:34,912 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:34,912 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-02T23:16:34,917 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "emptySnaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-12-02T23:16:34,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:34,920 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.3 delete name: "snaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-12-02T23:16:34,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:34,943 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=820 (was 809) Potentially hanging thread: HFileArchiver-27 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39905 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_71179475_22 at /127.0.0.1:42630 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (243496960) connection to localhost/127.0.0.1:39905 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: process reaper (pid 29685) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-21 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_71179475_22 at /127.0.0.1:49190 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1028403000_1 at /127.0.0.1:49164 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-26 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_71179475_22 at /127.0.0.1:47862 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-9853 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ApplicationMasterLauncher #19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=802 (was 791) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1140 (was 1084) - SystemLoadAverage LEAK? -, ProcessCount=20 (was 22), AvailableMemoryMB=1062 (was 1371) 2024-12-02T23:16:34,943 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=820 is superior to 500 2024-12-02T23:16:34,943 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2377): Stopping mini mapreduce cluster... 2024-12-02T23:16:34,954 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@64ab2e36{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-02T23:16:34,957 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@31c68162{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T23:16:34,957 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T23:16:34,957 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e5e644{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-02T23:16:34,957 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@72e92192{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop.log.dir/,STOPPED} 2024-12-02T23:16:34,973 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733180955226_0011_01_000001 is : 143 2024-12-02T23:16:34,982 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_1/usercache/jenkins/appcache/application_1733180955226_0011/container_1733180955226_0011_01_000001/launch_container.sh] 2024-12-02T23:16:34,982 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_1/usercache/jenkins/appcache/application_1733180955226_0011/container_1733180955226_0011_01_000001/container_tokens] 2024-12-02T23:16:34,982 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1258243352/yarn-5640649426/MiniMRCluster_1258243352-localDir-nm-1_1/usercache/jenkins/appcache/application_1733180955226_0011/container_1733180955226_0011_01_000001/sysfs] 2024-12-02T23:16:35,202 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T23:16:37,949 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 74f0c13379d206ce1601bf0b945b9ba8, had cached 0 bytes from a total of 5890 2024-12-02T23:16:37,949 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 53b6b7fd5134f3f13f85c7ab43d0a24d, had cached 0 bytes from a total of 14661 2024-12-02T23:16:38,797 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 44332 2024-12-02T23:16:40,180 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T23:16:42,502 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a8d1ca9c63bdfff14551a9b4d2eb111b 1/1 column families, dataSize=1.65 KB heapSize=3.90 KB 2024-12-02T23:16:42,519 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/acl/a8d1ca9c63bdfff14551a9b4d2eb111b/.tmp/l/e090b58d48494c6c9186e4d8ae1cbf46 is 74, key is testtb-testExportFileSystemStateWithMergeRegion-1/l:/1733181259845/DeleteFamily/seqid=0 2024-12-02T23:16:42,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742472_1648 (size=5860) 2024-12-02T23:16:42,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742472_1648 (size=5860) 2024-12-02T23:16:42,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742472_1648 (size=5860) 2024-12-02T23:16:42,524 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=31 (bloomFilter=false), to=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/acl/a8d1ca9c63bdfff14551a9b4d2eb111b/.tmp/l/e090b58d48494c6c9186e4d8ae1cbf46 2024-12-02T23:16:42,529 INFO [MemStoreFlusher.0 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for e090b58d48494c6c9186e4d8ae1cbf46 2024-12-02T23:16:42,529 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/acl/a8d1ca9c63bdfff14551a9b4d2eb111b/.tmp/l/e090b58d48494c6c9186e4d8ae1cbf46 as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/acl/a8d1ca9c63bdfff14551a9b4d2eb111b/l/e090b58d48494c6c9186e4d8ae1cbf46 2024-12-02T23:16:42,533 INFO [MemStoreFlusher.0 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for e090b58d48494c6c9186e4d8ae1cbf46 2024-12-02T23:16:42,533 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/acl/a8d1ca9c63bdfff14551a9b4d2eb111b/l/e090b58d48494c6c9186e4d8ae1cbf46, entries=14, sequenceid=31, filesize=5.7 K 2024-12-02T23:16:42,534 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1694, heapSize ~3.88 KB/3976, currentSize=0 B/0 for a8d1ca9c63bdfff14551a9b4d2eb111b in 32ms, sequenceid=31, compaction requested=false 2024-12-02T23:16:42,534 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a8d1ca9c63bdfff14551a9b4d2eb111b: 2024-12-02T23:16:51,981 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5c15c38e{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-02T23:16:51,982 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4847c338{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T23:16:51,982 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T23:16:51,982 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f1cb8d8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-02T23:16:51,982 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e8faea6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop.log.dir/,STOPPED} 2024-12-02T23:17:03,600 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T23:17:08,990 ERROR [Thread[Thread-398,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-02T23:17:08,990 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@75ef03f8{cluster,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-02T23:17:08,991 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2ae84ec1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T23:17:08,991 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T23:17:08,991 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6c8f1c0d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-02T23:17:08,991 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2a34a45f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop.log.dir/,STOPPED} 2024-12-02T23:17:08,994 WARN [ApplicationMaster Launcher {}] amlauncher.ApplicationMasterLauncher$LauncherThread(122): org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher$LauncherThread interrupted. Returning. 2024-12-02T23:17:09,000 ERROR [SchedulerEventDispatcher:Event Processor {}] event.EventDispatcher$EventProcessor(72): Returning, interrupted : java.lang.InterruptedException 2024-12-02T23:17:09,000 ERROR [ResourceManager Event Processor Monitor {}] resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessorMonitor(1193): Returning, interrupted : java.lang.InterruptedException: sleep interrupted 2024-12-02T23:17:09,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741830_1006 (size=1160855) 2024-12-02T23:17:09,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741830_1006 (size=1160855) 2024-12-02T23:17:09,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741830_1006 (size=1160855) 2024-12-02T23:17:09,005 ERROR [Thread[Thread-422,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-02T23:17:09,009 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6e13c6ec{jobhistory,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-02T23:17:09,010 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@635471c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T23:17:09,010 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T23:17:09,010 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2da1d21b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-02T23:17:09,010 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3f0a6d06{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop.log.dir/,STOPPED} 2024-12-02T23:17:09,012 ERROR [Thread[Thread-380,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-02T23:17:09,012 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2380): Mini mapreduce cluster stopped 2024-12-02T23:17:09,012 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-02T23:17:09,012 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T23:17:09,012 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T23:17:09,012 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:17:09,012 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T23:17:09,013 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:17:09,013 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-02T23:17:09,013 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=806974873, stopped=false 2024-12-02T23:17:09,013 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:17:09,013 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-02T23:17:09,013 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=9e5ab3b623d7,41513,1733180944669 2024-12-02T23:17:09,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T23:17:09,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T23:17:09,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T23:17:09,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T23:17:09,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T23:17:09,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T23:17:09,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T23:17:09,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T23:17:09,174 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T23:17:09,175 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T23:17:09,175 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T23:17:09,175 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T23:17:09,175 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T23:17:09,175 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T23:17:09,175 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:17:09,175 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T23:17:09,175 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '9e5ab3b623d7,39847,1733180945622' ***** 2024-12-02T23:17:09,176 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:17:09,176 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-02T23:17:09,176 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '9e5ab3b623d7,38049,1733180945779' ***** 2024-12-02T23:17:09,176 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:17:09,176 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-02T23:17:09,176 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '9e5ab3b623d7,41311,1733180945828' ***** 2024-12-02T23:17:09,176 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:17:09,176 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-02T23:17:09,176 INFO [RS:1;9e5ab3b623d7:38049 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T23:17:09,176 INFO [RS:2;9e5ab3b623d7:41311 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T23:17:09,176 INFO [RS:0;9e5ab3b623d7:39847 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T23:17:09,176 INFO [RS:2;9e5ab3b623d7:41311 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T23:17:09,176 INFO [RS:0;9e5ab3b623d7:39847 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T23:17:09,176 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-02T23:17:09,176 INFO [RS:1;9e5ab3b623d7:38049 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T23:17:09,176 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-02T23:17:09,177 INFO [RS:0;9e5ab3b623d7:39847 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T23:17:09,177 INFO [RS:1;9e5ab3b623d7:38049 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T23:17:09,177 INFO [RS:2;9e5ab3b623d7:41311 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T23:17:09,177 INFO [RS:0;9e5ab3b623d7:39847 {}] regionserver.HRegionServer(3091): Received CLOSE for 74f0c13379d206ce1601bf0b945b9ba8 2024-12-02T23:17:09,177 INFO [RS:1;9e5ab3b623d7:38049 {}] regionserver.HRegionServer(3091): Received CLOSE for 53b6b7fd5134f3f13f85c7ab43d0a24d 2024-12-02T23:17:09,177 INFO [RS:2;9e5ab3b623d7:41311 {}] regionserver.HRegionServer(3091): Received CLOSE for a8d1ca9c63bdfff14551a9b4d2eb111b 2024-12-02T23:17:09,177 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-02T23:17:09,177 INFO [RS:0;9e5ab3b623d7:39847 {}] regionserver.HRegionServer(959): stopping server 9e5ab3b623d7,39847,1733180945622 2024-12-02T23:17:09,177 INFO [RS:2;9e5ab3b623d7:41311 {}] regionserver.HRegionServer(959): stopping server 9e5ab3b623d7,41311,1733180945828 2024-12-02T23:17:09,177 INFO [RS:1;9e5ab3b623d7:38049 {}] regionserver.HRegionServer(959): stopping server 9e5ab3b623d7,38049,1733180945779 2024-12-02T23:17:09,177 INFO [RS:0;9e5ab3b623d7:39847 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T23:17:09,177 INFO [RS:1;9e5ab3b623d7:38049 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T23:17:09,177 INFO [RS:2;9e5ab3b623d7:41311 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T23:17:09,177 INFO [RS:0;9e5ab3b623d7:39847 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;9e5ab3b623d7:39847. 2024-12-02T23:17:09,177 INFO [RS:2;9e5ab3b623d7:41311 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;9e5ab3b623d7:41311. 2024-12-02T23:17:09,177 INFO [RS:1;9e5ab3b623d7:38049 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;9e5ab3b623d7:38049. 2024-12-02T23:17:09,177 DEBUG [RS:0;9e5ab3b623d7:39847 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T23:17:09,177 DEBUG [RS:2;9e5ab3b623d7:41311 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T23:17:09,177 DEBUG [RS:1;9e5ab3b623d7:38049 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T23:17:09,177 DEBUG [RS:0;9e5ab3b623d7:39847 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:17:09,177 DEBUG [RS:2;9e5ab3b623d7:41311 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:17:09,177 DEBUG [RS:1;9e5ab3b623d7:38049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:17:09,178 INFO [RS:0;9e5ab3b623d7:39847 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T23:17:09,178 INFO [RS:1;9e5ab3b623d7:38049 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-02T23:17:09,178 INFO [RS:0;9e5ab3b623d7:39847 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T23:17:09,178 INFO [RS:0;9e5ab3b623d7:39847 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T23:17:09,178 DEBUG [RS:1;9e5ab3b623d7:38049 {}] regionserver.HRegionServer(1325): Online Regions={53b6b7fd5134f3f13f85c7ab43d0a24d=testExportExpiredSnapshot,1,1733181262613.53b6b7fd5134f3f13f85c7ab43d0a24d.} 2024-12-02T23:17:09,178 INFO [RS:0;9e5ab3b623d7:39847 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-02T23:17:09,178 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 53b6b7fd5134f3f13f85c7ab43d0a24d, disabling compactions & flushes 2024-12-02T23:17:09,178 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 74f0c13379d206ce1601bf0b945b9ba8, disabling compactions & flushes 2024-12-02T23:17:09,178 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing a8d1ca9c63bdfff14551a9b4d2eb111b, disabling compactions & flushes 2024-12-02T23:17:09,178 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1733181262613.53b6b7fd5134f3f13f85c7ab43d0a24d. 2024-12-02T23:17:09,178 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b. 2024-12-02T23:17:09,178 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1733181262613.74f0c13379d206ce1601bf0b945b9ba8. 2024-12-02T23:17:09,178 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1733181262613.53b6b7fd5134f3f13f85c7ab43d0a24d. 2024-12-02T23:17:09,178 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1733181262613.74f0c13379d206ce1601bf0b945b9ba8. 2024-12-02T23:17:09,178 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b. 2024-12-02T23:17:09,178 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1733181262613.53b6b7fd5134f3f13f85c7ab43d0a24d. after waiting 0 ms 2024-12-02T23:17:09,178 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1733181262613.74f0c13379d206ce1601bf0b945b9ba8. after waiting 0 ms 2024-12-02T23:17:09,178 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b. after waiting 0 ms 2024-12-02T23:17:09,178 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1733181262613.53b6b7fd5134f3f13f85c7ab43d0a24d. 2024-12-02T23:17:09,178 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b. 2024-12-02T23:17:09,178 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1733181262613.74f0c13379d206ce1601bf0b945b9ba8. 2024-12-02T23:17:09,178 INFO [RS:2;9e5ab3b623d7:41311 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-02T23:17:09,178 DEBUG [RS:2;9e5ab3b623d7:41311 {}] regionserver.HRegionServer(1325): Online Regions={a8d1ca9c63bdfff14551a9b4d2eb111b=hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b.} 2024-12-02T23:17:09,178 DEBUG [RS:2;9e5ab3b623d7:41311 {}] regionserver.HRegionServer(1351): Waiting on a8d1ca9c63bdfff14551a9b4d2eb111b 2024-12-02T23:17:09,178 DEBUG [RS:1;9e5ab3b623d7:38049 {}] regionserver.HRegionServer(1351): Waiting on 53b6b7fd5134f3f13f85c7ab43d0a24d 2024-12-02T23:17:09,178 INFO [RS:0;9e5ab3b623d7:39847 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-02T23:17:09,178 DEBUG [RS:0;9e5ab3b623d7:39847 {}] regionserver.HRegionServer(1325): Online Regions={74f0c13379d206ce1601bf0b945b9ba8=testExportExpiredSnapshot,,1733181262613.74f0c13379d206ce1601bf0b945b9ba8., 1588230740=hbase:meta,,1.1588230740} 2024-12-02T23:17:09,178 DEBUG [RS:0;9e5ab3b623d7:39847 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 74f0c13379d206ce1601bf0b945b9ba8 2024-12-02T23:17:09,178 DEBUG [RS_CLOSE_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T23:17:09,178 INFO [RS_CLOSE_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T23:17:09,178 DEBUG [RS_CLOSE_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T23:17:09,178 DEBUG [RS_CLOSE_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T23:17:09,178 DEBUG [RS_CLOSE_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T23:17:09,178 INFO [RS_CLOSE_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=2.96 KB heapSize=5.66 KB 2024-12-02T23:17:09,182 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/acl/a8d1ca9c63bdfff14551a9b4d2eb111b/recovered.edits/34.seqid, newMaxSeqId=34, maxSeqId=1 2024-12-02T23:17:09,183 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportExpiredSnapshot/74f0c13379d206ce1601bf0b945b9ba8/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-02T23:17:09,183 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:17:09,183 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b. 2024-12-02T23:17:09,183 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for a8d1ca9c63bdfff14551a9b4d2eb111b: Waiting for close lock at 1733181429178Running coprocessor pre-close hooks at 1733181429178Disabling compacts and flushes for region at 1733181429178Disabling writes for close at 1733181429178Writing region close event to WAL at 1733181429179 (+1 ms)Running coprocessor post-close hooks at 1733181429183 (+4 ms)Closed at 1733181429183 2024-12-02T23:17:09,183 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:acl,,1733180949434.a8d1ca9c63bdfff14551a9b4d2eb111b. 2024-12-02T23:17:09,183 DEBUG [RS_CLOSE_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/.tmp/info/cacf9d32c04f4b55b484b26a4ed0770d is 121, key is testtb-testExportFileSystemStateWithSkipTmp,1,1733181369325.4872baa9eb6392c2c49a511c37185e0d./info:/1733181394858/DeleteFamily/seqid=0 2024-12-02T23:17:09,227 INFO [regionserver/9e5ab3b623d7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T23:17:09,255 INFO [regionserver/9e5ab3b623d7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T23:17:09,270 INFO [regionserver/9e5ab3b623d7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T23:17:09,378 DEBUG [RS:1;9e5ab3b623d7:38049 {}] regionserver.HRegionServer(1351): Waiting on 53b6b7fd5134f3f13f85c7ab43d0a24d 2024-12-02T23:17:09,378 DEBUG [RS:0;9e5ab3b623d7:39847 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 74f0c13379d206ce1601bf0b945b9ba8 2024-12-02T23:17:09,378 INFO [RS:2;9e5ab3b623d7:41311 {}] regionserver.HRegionServer(976): stopping server 9e5ab3b623d7,41311,1733180945828; all regions closed. 2024-12-02T23:17:09,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741834_1010 (size=17949) 2024-12-02T23:17:09,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741834_1010 (size=17949) 2024-12-02T23:17:09,398 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:17:09,398 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1733181262613.74f0c13379d206ce1601bf0b945b9ba8. 2024-12-02T23:17:09,399 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 74f0c13379d206ce1601bf0b945b9ba8: Waiting for close lock at 1733181429177Running coprocessor pre-close hooks at 1733181429178 (+1 ms)Disabling compacts and flushes for region at 1733181429178Disabling writes for close at 1733181429178Writing region close event to WAL at 1733181429179 (+1 ms)Running coprocessor post-close hooks at 1733181429398 (+219 ms)Closed at 1733181429398 2024-12-02T23:17:09,399 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,,1733181262613.74f0c13379d206ce1601bf0b945b9ba8. 2024-12-02T23:17:09,400 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/default/testExportExpiredSnapshot/53b6b7fd5134f3f13f85c7ab43d0a24d/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-02T23:17:09,400 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:17:09,400 INFO [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1733181262613.53b6b7fd5134f3f13f85c7ab43d0a24d. 2024-12-02T23:17:09,400 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 53b6b7fd5134f3f13f85c7ab43d0a24d: Waiting for close lock at 1733181429177Running coprocessor pre-close hooks at 1733181429178 (+1 ms)Disabling compacts and flushes for region at 1733181429178Disabling writes for close at 1733181429178Writing region close event to WAL at 1733181429182 (+4 ms)Running coprocessor post-close hooks at 1733181429400 (+218 ms)Closed at 1733181429400 2024-12-02T23:17:09,400 DEBUG [RS_CLOSE_REGION-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,1,1733181262613.53b6b7fd5134f3f13f85c7ab43d0a24d. 2024-12-02T23:17:09,401 DEBUG [RS:2;9e5ab3b623d7:41311 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/oldWALs 2024-12-02T23:17:09,401 INFO [RS:2;9e5ab3b623d7:41311 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 9e5ab3b623d7%2C41311%2C1733180945828:(num 1733180948209) 2024-12-02T23:17:09,401 DEBUG [RS:2;9e5ab3b623d7:41311 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:17:09,401 INFO [RS:2;9e5ab3b623d7:41311 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T23:17:09,402 INFO [RS:2;9e5ab3b623d7:41311 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T23:17:09,402 INFO [RS:2;9e5ab3b623d7:41311 {}] hbase.ChoreService(370): Chore service for: regionserver/9e5ab3b623d7:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-02T23:17:09,402 INFO [RS:2;9e5ab3b623d7:41311 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T23:17:09,402 INFO [RS:2;9e5ab3b623d7:41311 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T23:17:09,402 INFO [RS:2;9e5ab3b623d7:41311 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T23:17:09,402 INFO [regionserver/9e5ab3b623d7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T23:17:09,402 INFO [RS:2;9e5ab3b623d7:41311 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T23:17:09,403 INFO [RS:2;9e5ab3b623d7:41311 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:41311 2024-12-02T23:17:09,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742473_1649 (size=5985) 2024-12-02T23:17:09,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742473_1649 (size=5985) 2024-12-02T23:17:09,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742473_1649 (size=5985) 2024-12-02T23:17:09,408 INFO [RS_CLOSE_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.05 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/.tmp/info/cacf9d32c04f4b55b484b26a4ed0770d 2024-12-02T23:17:09,416 DEBUG [RS_CLOSE_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/.tmp/ns/cfb507a3b9b643e29f39e22c59583e64 is 119, key is testtb-testExportFileSystemStateWithSkipTmp,1,1733181369325.4872baa9eb6392c2c49a511c37185e0d./ns:/1733181394847/DeleteFamily/seqid=0 2024-12-02T23:17:09,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742474_1650 (size=5738) 2024-12-02T23:17:09,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742474_1650 (size=5738) 2024-12-02T23:17:09,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742474_1650 (size=5738) 2024-12-02T23:17:09,423 INFO [RS_CLOSE_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=229 B at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/.tmp/ns/cfb507a3b9b643e29f39e22c59583e64 2024-12-02T23:17:09,430 DEBUG [RS_CLOSE_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/.tmp/rep_barrier/75ad9942c6ac4b4e807bd545abba0690 is 128, key is testtb-testExportFileSystemStateWithSkipTmp,1,1733181369325.4872baa9eb6392c2c49a511c37185e0d./rep_barrier:/1733181394847/DeleteFamily/seqid=0 2024-12-02T23:17:09,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742475_1651 (size=5784) 2024-12-02T23:17:09,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742475_1651 (size=5784) 2024-12-02T23:17:09,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742475_1651 (size=5784) 2024-12-02T23:17:09,435 INFO [RS_CLOSE_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=247 B at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/.tmp/rep_barrier/75ad9942c6ac4b4e807bd545abba0690 2024-12-02T23:17:09,441 DEBUG [RS_CLOSE_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/.tmp/table/ac8e303a2a1241099362649c1e67ed02 is 122, key is testtb-testExportFileSystemStateWithSkipTmp,1,1733181369325.4872baa9eb6392c2c49a511c37185e0d./table:/1733181394847/DeleteFamily/seqid=0 2024-12-02T23:17:09,497 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/9e5ab3b623d7,41311,1733180945828 2024-12-02T23:17:09,497 INFO [RS:2;9e5ab3b623d7:41311 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T23:17:09,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T23:17:09,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742476_1652 (size=5748) 2024-12-02T23:17:09,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742476_1652 (size=5748) 2024-12-02T23:17:09,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742476_1652 (size=5748) 2024-12-02T23:17:09,516 INFO [RS_CLOSE_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=458 B at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/.tmp/table/ac8e303a2a1241099362649c1e67ed02 2024-12-02T23:17:09,522 DEBUG [RS_CLOSE_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/.tmp/info/cacf9d32c04f4b55b484b26a4ed0770d as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/info/cacf9d32c04f4b55b484b26a4ed0770d 2024-12-02T23:17:09,526 INFO [RS_CLOSE_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/info/cacf9d32c04f4b55b484b26a4ed0770d, entries=4, sequenceid=239, filesize=5.8 K 2024-12-02T23:17:09,527 DEBUG [RS_CLOSE_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/.tmp/ns/cfb507a3b9b643e29f39e22c59583e64 as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/ns/cfb507a3b9b643e29f39e22c59583e64 2024-12-02T23:17:09,533 INFO [RS_CLOSE_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/ns/cfb507a3b9b643e29f39e22c59583e64, entries=2, sequenceid=239, filesize=5.6 K 2024-12-02T23:17:09,534 DEBUG [RS_CLOSE_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/.tmp/rep_barrier/75ad9942c6ac4b4e807bd545abba0690 as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/rep_barrier/75ad9942c6ac4b4e807bd545abba0690 2024-12-02T23:17:09,538 INFO [RS_CLOSE_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/rep_barrier/75ad9942c6ac4b4e807bd545abba0690, entries=2, sequenceid=239, filesize=5.6 K 2024-12-02T23:17:09,538 DEBUG [RS_CLOSE_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/.tmp/table/ac8e303a2a1241099362649c1e67ed02 as hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/table/ac8e303a2a1241099362649c1e67ed02 2024-12-02T23:17:09,542 INFO [RS_CLOSE_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/table/ac8e303a2a1241099362649c1e67ed02, entries=3, sequenceid=239, filesize=5.6 K 2024-12-02T23:17:09,543 INFO [RS_CLOSE_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~2.96 KB/3031, heapSize ~5.59 KB/5728, currentSize=0 B/0 for 1588230740 in 365ms, sequenceid=239, compaction requested=false 2024-12-02T23:17:09,547 DEBUG [RS_CLOSE_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/data/hbase/meta/1588230740/recovered.edits/242.seqid, newMaxSeqId=242, maxSeqId=1 2024-12-02T23:17:09,547 DEBUG [RS_CLOSE_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:17:09,547 DEBUG [RS_CLOSE_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T23:17:09,548 INFO [RS_CLOSE_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T23:17:09,548 DEBUG [RS_CLOSE_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733181429178Running coprocessor pre-close hooks at 1733181429178Disabling compacts and flushes for region at 1733181429178Disabling writes for close at 1733181429178Obtaining lock to block concurrent updates at 1733181429178Preparing flush snapshotting stores in 1588230740 at 1733181429178Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=3031, getHeapSize=5728, getOffHeapSize=0, getCellsCount=23 at 1733181429179 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733181429179Flushing 1588230740/info: creating writer at 1733181429179Flushing 1588230740/info: appending metadata at 1733181429183 (+4 ms)Flushing 1588230740/info: closing flushed file at 1733181429183Flushing 1588230740/ns: creating writer at 1733181429413 (+230 ms)Flushing 1588230740/ns: appending metadata at 1733181429416 (+3 ms)Flushing 1588230740/ns: closing flushed file at 1733181429416Flushing 1588230740/rep_barrier: creating writer at 1733181429427 (+11 ms)Flushing 1588230740/rep_barrier: appending metadata at 1733181429429 (+2 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1733181429429Flushing 1588230740/table: creating writer at 1733181429439 (+10 ms)Flushing 1588230740/table: appending metadata at 1733181429440 (+1 ms)Flushing 1588230740/table: closing flushed file at 1733181429440Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@15120f1e: reopening flushed file at 1733181429520 (+80 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2a6b581e: reopening flushed file at 1733181429527 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3f33833a: reopening flushed file at 1733181429533 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@30e70de: reopening flushed file at 1733181429538 (+5 ms)Finished flush of dataSize ~2.96 KB/3031, heapSize ~5.59 KB/5728, currentSize=0 B/0 for 1588230740 in 365ms, sequenceid=239, compaction requested=false at 1733181429543 (+5 ms)Writing region close event to WAL at 1733181429545 (+2 ms)Running coprocessor post-close hooks at 1733181429547 (+2 ms)Closed at 1733181429548 (+1 ms) 2024-12-02T23:17:09,548 DEBUG [RS_CLOSE_META-regionserver/9e5ab3b623d7:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-02T23:17:09,550 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [9e5ab3b623d7,41311,1733180945828] 2024-12-02T23:17:09,578 INFO [RS:1;9e5ab3b623d7:38049 {}] regionserver.HRegionServer(976): stopping server 9e5ab3b623d7,38049,1733180945779; all regions closed. 2024-12-02T23:17:09,578 INFO [RS:0;9e5ab3b623d7:39847 {}] regionserver.HRegionServer(976): stopping server 9e5ab3b623d7,39847,1733180945622; all regions closed. 2024-12-02T23:17:09,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741835_1011 (size=14813) 2024-12-02T23:17:09,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741835_1011 (size=14813) 2024-12-02T23:17:09,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741836_1012 (size=100501) 2024-12-02T23:17:09,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741835_1011 (size=14813) 2024-12-02T23:17:09,585 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/WALs/9e5ab3b623d7,38049,1733180945779/9e5ab3b623d7%2C38049%2C1733180945779.1733180948243 not finished, retry = 0 2024-12-02T23:17:09,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741836_1012 (size=100501) 2024-12-02T23:17:09,588 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/9e5ab3b623d7,41311,1733180945828 already deleted, retry=false 2024-12-02T23:17:09,589 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 9e5ab3b623d7,41311,1733180945828 expired; onlineServers=2 2024-12-02T23:17:09,594 DEBUG [RS:0;9e5ab3b623d7:39847 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/oldWALs 2024-12-02T23:17:09,594 INFO [RS:0;9e5ab3b623d7:39847 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 9e5ab3b623d7%2C39847%2C1733180945622.meta:.meta(num 1733180948731) 2024-12-02T23:17:09,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073741833_1009 (size=10337) 2024-12-02T23:17:09,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073741833_1009 (size=10337) 2024-12-02T23:17:09,598 DEBUG [RS:0;9e5ab3b623d7:39847 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/oldWALs 2024-12-02T23:17:09,598 INFO [RS:0;9e5ab3b623d7:39847 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 9e5ab3b623d7%2C39847%2C1733180945622:(num 1733180948195) 2024-12-02T23:17:09,598 DEBUG [RS:0;9e5ab3b623d7:39847 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:17:09,598 INFO [RS:0;9e5ab3b623d7:39847 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T23:17:09,598 INFO [RS:0;9e5ab3b623d7:39847 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T23:17:09,598 INFO [RS:0;9e5ab3b623d7:39847 {}] hbase.ChoreService(370): Chore service for: regionserver/9e5ab3b623d7:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-02T23:17:09,598 INFO [RS:0;9e5ab3b623d7:39847 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T23:17:09,598 INFO [RS:0;9e5ab3b623d7:39847 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:39847 2024-12-02T23:17:09,599 INFO [regionserver/9e5ab3b623d7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T23:17:09,619 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/9e5ab3b623d7,39847,1733180945622 2024-12-02T23:17:09,619 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T23:17:09,619 INFO [RS:0;9e5ab3b623d7:39847 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T23:17:09,620 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [9e5ab3b623d7,39847,1733180945622] 2024-12-02T23:17:09,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T23:17:09,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41311-0x101503561da0003, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T23:17:09,652 INFO [RS:2;9e5ab3b623d7:41311 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T23:17:09,652 INFO [RS:2;9e5ab3b623d7:41311 {}] regionserver.HRegionServer(1031): Exiting; stopping=9e5ab3b623d7,41311,1733180945828; zookeeper connection closed. 2024-12-02T23:17:09,652 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@744f3c4d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@744f3c4d 2024-12-02T23:17:09,689 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/9e5ab3b623d7,39847,1733180945622 already deleted, retry=false 2024-12-02T23:17:09,689 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 9e5ab3b623d7,39847,1733180945622 expired; onlineServers=1 2024-12-02T23:17:09,722 DEBUG [RS:1;9e5ab3b623d7:38049 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/oldWALs 2024-12-02T23:17:09,722 INFO [RS:1;9e5ab3b623d7:38049 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 9e5ab3b623d7%2C38049%2C1733180945779:(num 1733180948243) 2024-12-02T23:17:09,722 DEBUG [RS:1;9e5ab3b623d7:38049 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T23:17:09,722 INFO [RS:1;9e5ab3b623d7:38049 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T23:17:09,723 INFO [RS:1;9e5ab3b623d7:38049 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T23:17:09,723 INFO [RS:1;9e5ab3b623d7:38049 {}] hbase.ChoreService(370): Chore service for: regionserver/9e5ab3b623d7:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-02T23:17:09,723 INFO [RS:1;9e5ab3b623d7:38049 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T23:17:09,723 INFO [RS:1;9e5ab3b623d7:38049 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T23:17:09,723 INFO [RS:1;9e5ab3b623d7:38049 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T23:17:09,723 INFO [RS:1;9e5ab3b623d7:38049 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T23:17:09,723 INFO [regionserver/9e5ab3b623d7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T23:17:09,723 INFO [RS:1;9e5ab3b623d7:38049 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:38049 2024-12-02T23:17:09,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T23:17:09,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/9e5ab3b623d7,38049,1733180945779 2024-12-02T23:17:09,735 INFO [RS:1;9e5ab3b623d7:38049 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T23:17:09,758 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [9e5ab3b623d7,38049,1733180945779] 2024-12-02T23:17:09,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T23:17:09,758 INFO [RS:0;9e5ab3b623d7:39847 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T23:17:09,758 INFO [RS:0;9e5ab3b623d7:39847 {}] regionserver.HRegionServer(1031): Exiting; stopping=9e5ab3b623d7,39847,1733180945622; zookeeper connection closed. 2024-12-02T23:17:09,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39847-0x101503561da0001, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T23:17:09,759 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@e80fac4 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@e80fac4 2024-12-02T23:17:09,773 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/9e5ab3b623d7,38049,1733180945779 already deleted, retry=false 2024-12-02T23:17:09,773 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 9e5ab3b623d7,38049,1733180945779 expired; onlineServers=0 2024-12-02T23:17:09,774 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '9e5ab3b623d7,41513,1733180944669' ***** 2024-12-02T23:17:09,774 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-02T23:17:09,774 INFO [M:0;9e5ab3b623d7:41513 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T23:17:09,774 INFO [M:0;9e5ab3b623d7:41513 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T23:17:09,774 DEBUG [M:0;9e5ab3b623d7:41513 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-02T23:17:09,774 DEBUG [M:0;9e5ab3b623d7:41513 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-02T23:17:09,774 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-02T23:17:09,774 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster-HFileCleaner.large.0-1733180947706 {}] cleaner.HFileCleaner(306): Exit Thread[master/9e5ab3b623d7:0:becomeActiveMaster-HFileCleaner.large.0-1733180947706,5,FailOnTimeoutGroup] 2024-12-02T23:17:09,774 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster-HFileCleaner.small.0-1733180947721 {}] cleaner.HFileCleaner(306): Exit Thread[master/9e5ab3b623d7:0:becomeActiveMaster-HFileCleaner.small.0-1733180947721,5,FailOnTimeoutGroup] 2024-12-02T23:17:09,774 INFO [M:0;9e5ab3b623d7:41513 {}] hbase.ChoreService(370): Chore service for: master/9e5ab3b623d7:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-02T23:17:09,775 INFO [M:0;9e5ab3b623d7:41513 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T23:17:09,775 DEBUG [M:0;9e5ab3b623d7:41513 {}] master.HMaster(1795): Stopping service threads 2024-12-02T23:17:09,775 INFO [M:0;9e5ab3b623d7:41513 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-02T23:17:09,775 INFO [M:0;9e5ab3b623d7:41513 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T23:17:09,775 INFO [M:0;9e5ab3b623d7:41513 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-02T23:17:09,775 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-02T23:17:09,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-02T23:17:09,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T23:17:09,789 DEBUG [M:0;9e5ab3b623d7:41513 {}] zookeeper.ZKUtil(347): master:41513-0x101503561da0000, quorum=127.0.0.1:57019, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-02T23:17:09,789 WARN [M:0;9e5ab3b623d7:41513 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-02T23:17:09,790 INFO [M:0;9e5ab3b623d7:41513 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/.lastflushedseqids 2024-12-02T23:17:09,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37623 is added to blk_1073742477_1653 (size=316) 2024-12-02T23:17:09,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073742477_1653 (size=316) 2024-12-02T23:17:09,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39833 is added to blk_1073742477_1653 (size=316) 2024-12-02T23:17:09,807 INFO [M:0;9e5ab3b623d7:41513 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-02T23:17:09,808 INFO [M:0;9e5ab3b623d7:41513 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-02T23:17:09,808 DEBUG [M:0;9e5ab3b623d7:41513 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T23:17:09,823 INFO [M:0;9e5ab3b623d7:41513 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T23:17:09,823 DEBUG [M:0;9e5ab3b623d7:41513 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T23:17:09,823 DEBUG [M:0;9e5ab3b623d7:41513 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T23:17:09,823 DEBUG [M:0;9e5ab3b623d7:41513 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T23:17:09,823 INFO [M:0;9e5ab3b623d7:41513 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=987.33 KB heapSize=1.16 MB 2024-12-02T23:17:09,824 ERROR [AsyncFSWAL-0-hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/MasterData-prefix:9e5ab3b623d7,41513,1733180944669 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/MasterData-prefix:9e5ab3b623d7,41513,1733180944669,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:414) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:134) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:181) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:100) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1333) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendAndSync(AbstractFSWAL.java:1724) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.consume(AbstractFSWAL.java:1832) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T23:17:09,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T23:17:09,858 INFO [RS:1;9e5ab3b623d7:38049 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T23:17:09,858 INFO [RS:1;9e5ab3b623d7:38049 {}] regionserver.HRegionServer(1031): Exiting; stopping=9e5ab3b623d7,38049,1733180945779; zookeeper connection closed. 2024-12-02T23:17:09,858 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38049-0x101503561da0002, quorum=127.0.0.1:57019, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T23:17:09,859 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@32eba6ca {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@32eba6ca 2024-12-02T23:17:09,860 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-02T23:17:12,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741836_1012 (size=100501) 2024-12-02T23:17:12,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741833_1009 (size=10337) 2024-12-02T23:17:12,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45149 is added to blk_1073741834_1010 (size=17949) 2024-12-02T23:17:14,700 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T23:17:15,202 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:17:15,202 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T23:17:15,202 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-02T23:17:15,202 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-02T23:17:15,202 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-02T23:17:15,202 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:17:15,202 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-02T23:17:15,202 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T23:17:20,704 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T23:17:33,600 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T23:18:03,601 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;9e5ab3b623d7:41513 240 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 19 Waited count: 20 Waiting on java.lang.ref.ReferenceQueue$Lock@76fbaf48 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 19 Waited count: 24 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 1 Waited count: 21 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c43cb9e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 2 Waited count: 36 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5510 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 56 Waiting on java.util.concurrent.CountDownLatch$Sync@15a045a1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11553 Waited count: 12447 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 20 Waited count: 17 Waiting on java.lang.ref.ReferenceQueue$Lock@1a817882 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@787c4f77 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@179eb2f2): State: TIMED_WAITING Blocked count: 0 Waited count: 1098 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 110 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1179866921-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc77842ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1179866921-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc77842ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1179866921-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc77842ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1179866921-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc77842ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1179866921-41-acceptor-0@6a03c20c-ServerConnector@921c3a7{HTTP/1.1, (http/1.1)}{localhost:39743}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1179866921-42): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1179866921-43): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1179866921-44): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-43e3462e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 43 Waited count: 3552 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@65f8ee46 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 33369): State: TIMED_WAITING Blocked count: 1 Waited count: 56 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 110 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@78d40b): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 183 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@61dfd7ba): State: TIMED_WAITING Blocked count: 0 Waited count: 110 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 184 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 53458 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 0 Waited count: 1770 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1db26216 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 33369): State: TIMED_WAITING Blocked count: 68 Waited count: 2720 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 33369): State: TIMED_WAITING Blocked count: 55 Waited count: 2716 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 33369): State: TIMED_WAITING Blocked count: 58 Waited count: 2709 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 33369): State: TIMED_WAITING Blocked count: 76 Waited count: 2729 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 33369): State: TIMED_WAITING Blocked count: 73 Waited count: 2709 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@4ffdcc14): State: TIMED_WAITING Blocked count: 0 Waited count: 274 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@e18af89): State: TIMED_WAITING Blocked count: 0 Waited count: 110 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@3082c3bd): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@a6f2b96): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(438496884)): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1912695997-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc77842ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1912695997-88-acceptor-0@49a135d1-ServerConnector@41d5b04a{HTTP/1.1, (http/1.1)}{localhost:35071}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1912695997-89): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1912695997-90): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-5a1751e1-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@71eec2ec): State: TIMED_WAITING Blocked count: 0 Waited count: 1095 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 35215): State: TIMED_WAITING Blocked count: 1 Waited count: 56 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 110 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 0 Waited count: 363 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2d2f112c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-217505668-172.17.0.3-1733180940242 heartbeating to localhost/127.0.0.1:33369): State: TIMED_WAITING Blocked count: 1512 Waited count: 1708 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@391e803c): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 35215): State: TIMED_WAITING Blocked count: 0 Waited count: 548 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 35215): State: TIMED_WAITING Blocked count: 0 Waited count: 548 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 35215): State: TIMED_WAITING Blocked count: 0 Waited count: 550 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 35215): State: TIMED_WAITING Blocked count: 0 Waited count: 549 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 35215): State: TIMED_WAITING Blocked count: 0 Waited count: 549 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp482364030-119): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc77842ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp482364030-120-acceptor-0@69642822-ServerConnector@33120766{HTTP/1.1, (http/1.1)}{localhost:35343}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp482364030-121): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp482364030-122): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-4eb75e32-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (IPC Client (243496960) connection to localhost/127.0.0.1:33369 from jenkins): State: TIMED_WAITING Blocked count: 1742 Waited count: 1742 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 124 (IPC Parameter Sending Thread for localhost/127.0.0.1:33369): State: TIMED_WAITING Blocked count: 0 Waited count: 2293 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@1c0140e1): State: TIMED_WAITING Blocked count: 0 Waited count: 1094 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 46543): State: TIMED_WAITING Blocked count: 1 Waited count: 56 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 110 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 3 Waited count: 326 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f4b8e8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-217505668-172.17.0.3-1733180940242 heartbeating to localhost/127.0.0.1:33369): State: TIMED_WAITING Blocked count: 1516 Waited count: 1700 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@45f506ea): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 46543): State: TIMED_WAITING Blocked count: 0 Waited count: 547 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 46543): State: TIMED_WAITING Blocked count: 0 Waited count: 547 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 46543): State: TIMED_WAITING Blocked count: 0 Waited count: 549 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 46543): State: TIMED_WAITING Blocked count: 0 Waited count: 551 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 46543): State: TIMED_WAITING Blocked count: 0 Waited count: 547 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 155 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp733149406-156): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc77842ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp733149406-157-acceptor-0@31434bbb-ServerConnector@66129f6{HTTP/1.1, (http/1.1)}{localhost:44295}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp733149406-158): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp733149406-160): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-7c38871e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data3)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data1)): State: TIMED_WAITING Blocked count: 11 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data4)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data2)): State: TIMED_WAITING Blocked count: 10 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 171 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data2/current/BP-217505668-172.17.0.3-1733180940242): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 174 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data1/current/BP-217505668-172.17.0.3-1733180940242): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data4/current/BP-217505668-172.17.0.3-1733180940242): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 178 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data3/current/BP-217505668-172.17.0.3-1733180940242): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 188 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 189 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (java.util.concurrent.ThreadPoolExecutor$Worker@5c0d0d7f[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 195 (java.util.concurrent.ThreadPoolExecutor$Worker@5aa85110[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4c65ffca): State: TIMED_WAITING Blocked count: 0 Waited count: 1093 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 200 (IPC Server idle connection scanner for port 38073): State: TIMED_WAITING Blocked count: 1 Waited count: 56 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 202 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 110 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (Command processor): State: WAITING Blocked count: 2 Waited count: 342 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@92ba5a1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 206 (BP-217505668-172.17.0.3-1733180940242 heartbeating to localhost/127.0.0.1:33369): State: TIMED_WAITING Blocked count: 1505 Waited count: 1707 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@c5436f6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 198 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 208 (IPC Server handler 0 on default port 38073): State: TIMED_WAITING Blocked count: 0 Waited count: 548 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 209 (IPC Server handler 1 on default port 38073): State: TIMED_WAITING Blocked count: 0 Waited count: 548 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 210 (IPC Server handler 2 on default port 38073): State: TIMED_WAITING Blocked count: 0 Waited count: 547 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 3 on default port 38073): State: TIMED_WAITING Blocked count: 0 Waited count: 548 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 4 on default port 38073): State: TIMED_WAITING Blocked count: 0 Waited count: 547 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 215 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data6/current/BP-217505668-172.17.0.3-1733180940242): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data5/current/BP-217505668-172.17.0.3-1733180940242): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (java.util.concurrent.ThreadPoolExecutor$Worker@481e06ed[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 236 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 6 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:57019): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 235 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 55 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 239 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 274 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 240 (SyncThread:0): State: WAITING Blocked count: 16 Waited count: 432 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ecf1793 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 241 (ProcessThread(sid:0 cport:57019):): State: WAITING Blocked count: 1 Waited count: 525 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4eac5111 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 242 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 556 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2f8c9b01 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 243 (NIOWorkerThread-1): State: WAITING Blocked count: 0 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 245 (LeaseRenewer:jenkins@localhost:33369): State: TIMED_WAITING Blocked count: 16 Waited count: 569 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@164bfb2f Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 1 Waited count: 620 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 42 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test-SendThread(127.0.0.1:57019)): State: RUNNABLE Blocked count: 32 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 258 (Time-limited test-EventThread): State: WAITING Blocked count: 2 Waited count: 61 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@71dcd4e4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 259 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (NIOWorkerThread-3): State: WAITING Blocked count: 5 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-4): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (zk-event-processor-pool-0): State: WAITING Blocked count: 17 Waited count: 74 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1bbea4e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-5): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-6): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-7): State: WAITING Blocked count: 0 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-9): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-11): State: WAITING Blocked count: 6 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-12): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-14): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-15): State: WAITING Blocked count: 0 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41513): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@48e0ebd5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513): State: WAITING Blocked count: 103 Waited count: 369 Waiting on java.util.concurrent.Semaphore$NonfairSync@6aa0e483 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513): State: WAITING Blocked count: 136 Waited count: 683 Waiting on java.util.concurrent.Semaphore$NonfairSync@7d4c66c7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41513): State: WAITING Blocked count: 134 Waited count: 11311 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23fb0aba Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41513): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6c0ae353 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41513): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6c0ae353 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=41513): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1e386ad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=41513): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1f07b208 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=41513): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@64d2c588 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=41513): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@5f79a23a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42328196 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 309 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 329 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 48 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 286 (M:0;9e5ab3b623d7:41513): State: TIMED_WAITING Blocked count: 12 Waited count: 4836 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1092/0x00007fc778f97480.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 350 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 55 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 352 (master/9e5ab3b623d7:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 354 (master/9e5ab3b623d7:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (org.apache.hadoop.hdfs.PeerCache@1f5eab6a): State: TIMED_WAITING Blocked count: 0 Waited count: 182 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 373 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5418 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 388 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 68 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 389 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 72 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 403 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 153 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 55 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 402 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 54128 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 60 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 424 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 42 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 440 (RegionServerTracker-0): State: WAITING Blocked count: 10 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@473cace2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 463 (regionserver/9e5ab3b623d7:0.procedureResultReporter): State: WAITING Blocked count: 11 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4b583fc8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 466 (regionserver/9e5ab3b623d7:0.procedureResultReporter): State: WAITING Blocked count: 18 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@51bf6f00 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 475 (regionserver/9e5ab3b623d7:0.procedureResultReporter): State: WAITING Blocked count: 18 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ef6ef39 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 487 (LeaseRenewer:jenkins.hfs.0@localhost:33369): State: TIMED_WAITING Blocked count: 16 Waited count: 568 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 499 (LeaseRenewer:jenkins.hfs.1@localhost:33369): State: TIMED_WAITING Blocked count: 16 Waited count: 570 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 502 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 510 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 511 (region-location-0): State: WAITING Blocked count: 8 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c8f03b9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 515 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 401 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 53920 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 520 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 544 (ForkJoinPool.commonPool-worker-2): State: WAITING Blocked count: 0 Waited count: 899 Waiting on java.util.concurrent.ForkJoinPool@7623d59f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 569 (region-location-1): State: WAITING Blocked count: 7 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c8f03b9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 570 (region-location-2): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c8f03b9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 977 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1035 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1041 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1071 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1084 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 69 Waited count: 111 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3528c0a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1088 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1089 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1090 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1233 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1234 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1235 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1285 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1286 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1287 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1291 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1292 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1646 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 70 Waiting on java.util.TaskQueue@31be9f74 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1874 (region-location-3): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c8f03b9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1875 (region-location-4): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c8f03b9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2211 (ForkJoinPool.commonPool-worker-4): State: WAITING Blocked count: 0 Waited count: 515 Waiting on java.util.concurrent.ForkJoinPool@7623d59f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 3736 (ForkJoinPool.commonPool-worker-5): State: TIMED_WAITING Blocked count: 0 Waited count: 693 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 5544 (java.util.concurrent.ThreadPoolExecutor$Worker@63227a7f[State = -1, empty queue]): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@59593bf1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6444 (ForkJoinPool.commonPool-worker-6): State: WAITING Blocked count: 0 Waited count: 182 Waiting on java.util.concurrent.ForkJoinPool@7623d59f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 6626 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6627 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11430 (AsyncFSWAL-1-hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/MasterData-prefix:9e5ab3b623d7,41513,1733180944669): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7621343d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11433 (java.util.concurrent.ThreadPoolExecutor$Worker@68e6cd68[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11434 (java.util.concurrent.ThreadPoolExecutor$Worker@7f7ef489[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11437 (java.util.concurrent.ThreadPoolExecutor$Worker@105f7a98[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11440 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 11441 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-12-02T23:18:33,601 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T23:19:03,601 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T23:19:05,951 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=10, reused chunk count=28, reuseRatio=73.68% 2024-12-02T23:19:05,951 DEBUG [master/9e5ab3b623d7:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;9e5ab3b623d7:41513 233 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 19 Waited count: 20 Waiting on java.lang.ref.ReferenceQueue$Lock@76fbaf48 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 19 Waited count: 25 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 1 Waited count: 24 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c43cb9e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 2 Waited count: 39 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 6110 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 62 Waiting on java.util.concurrent.CountDownLatch$Sync@546e8778 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11553 Waited count: 12448 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 20 Waited count: 17 Waiting on java.lang.ref.ReferenceQueue$Lock@1a817882 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@787c4f77 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@179eb2f2): State: TIMED_WAITING Blocked count: 0 Waited count: 1218 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 122 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1179866921-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc77842ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1179866921-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc77842ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1179866921-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc77842ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1179866921-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc77842ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1179866921-41-acceptor-0@6a03c20c-ServerConnector@921c3a7{HTTP/1.1, (http/1.1)}{localhost:39743}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1179866921-42): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1179866921-43): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1179866921-44): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-43e3462e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 43 Waited count: 3552 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@65f8ee46 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 33369): State: TIMED_WAITING Blocked count: 1 Waited count: 62 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 122 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@78d40b): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 203 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@61dfd7ba): State: TIMED_WAITING Blocked count: 0 Waited count: 122 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 204 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 59421 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 0 Waited count: 1770 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1db26216 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 33369): State: TIMED_WAITING Blocked count: 68 Waited count: 2782 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 33369): State: TIMED_WAITING Blocked count: 55 Waited count: 2777 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 33369): State: TIMED_WAITING Blocked count: 58 Waited count: 2770 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 33369): State: TIMED_WAITING Blocked count: 76 Waited count: 2790 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 33369): State: TIMED_WAITING Blocked count: 73 Waited count: 2770 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@4ffdcc14): State: TIMED_WAITING Blocked count: 0 Waited count: 304 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@e18af89): State: TIMED_WAITING Blocked count: 0 Waited count: 122 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@3082c3bd): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@a6f2b96): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(438496884)): State: TIMED_WAITING Blocked count: 0 Waited count: 22 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1912695997-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc77842ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1912695997-88-acceptor-0@49a135d1-ServerConnector@41d5b04a{HTTP/1.1, (http/1.1)}{localhost:35071}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1912695997-89): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1912695997-90): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-5a1751e1-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@71eec2ec): State: TIMED_WAITING Blocked count: 0 Waited count: 1215 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 35215): State: TIMED_WAITING Blocked count: 1 Waited count: 62 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 122 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 0 Waited count: 383 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2d2f112c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-217505668-172.17.0.3-1733180940242 heartbeating to localhost/127.0.0.1:33369): State: TIMED_WAITING Blocked count: 1532 Waited count: 1748 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@391e803c): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 35215): State: TIMED_WAITING Blocked count: 0 Waited count: 608 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 35215): State: TIMED_WAITING Blocked count: 0 Waited count: 608 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 35215): State: TIMED_WAITING Blocked count: 0 Waited count: 610 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 35215): State: TIMED_WAITING Blocked count: 0 Waited count: 609 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 35215): State: TIMED_WAITING Blocked count: 0 Waited count: 609 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp482364030-119): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc77842ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp482364030-120-acceptor-0@69642822-ServerConnector@33120766{HTTP/1.1, (http/1.1)}{localhost:35343}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp482364030-121): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp482364030-122): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-4eb75e32-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (IPC Client (243496960) connection to localhost/127.0.0.1:33369 from jenkins): State: TIMED_WAITING Blocked count: 1802 Waited count: 1802 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 124 (IPC Parameter Sending Thread for localhost/127.0.0.1:33369): State: TIMED_WAITING Blocked count: 0 Waited count: 2353 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@1c0140e1): State: TIMED_WAITING Blocked count: 0 Waited count: 1214 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 46543): State: TIMED_WAITING Blocked count: 1 Waited count: 62 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 122 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 3 Waited count: 346 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f4b8e8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-217505668-172.17.0.3-1733180940242 heartbeating to localhost/127.0.0.1:33369): State: TIMED_WAITING Blocked count: 1536 Waited count: 1740 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@45f506ea): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 46543): State: TIMED_WAITING Blocked count: 0 Waited count: 608 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 46543): State: TIMED_WAITING Blocked count: 0 Waited count: 608 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 46543): State: TIMED_WAITING Blocked count: 0 Waited count: 610 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 46543): State: TIMED_WAITING Blocked count: 0 Waited count: 611 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 46543): State: TIMED_WAITING Blocked count: 0 Waited count: 607 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 155 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp733149406-156): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc77842ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp733149406-157-acceptor-0@31434bbb-ServerConnector@66129f6{HTTP/1.1, (http/1.1)}{localhost:44295}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp733149406-158): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp733149406-160): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-7c38871e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data3)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data1)): State: TIMED_WAITING Blocked count: 11 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data4)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data2)): State: TIMED_WAITING Blocked count: 10 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 171 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data2/current/BP-217505668-172.17.0.3-1733180940242): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 174 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data1/current/BP-217505668-172.17.0.3-1733180940242): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data4/current/BP-217505668-172.17.0.3-1733180940242): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 178 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data3/current/BP-217505668-172.17.0.3-1733180940242): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 188 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@61b84862 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 189 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@613c19d1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (java.util.concurrent.ThreadPoolExecutor$Worker@5c0d0d7f[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 195 (java.util.concurrent.ThreadPoolExecutor$Worker@5aa85110[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4c65ffca): State: TIMED_WAITING Blocked count: 0 Waited count: 1213 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 200 (IPC Server idle connection scanner for port 38073): State: TIMED_WAITING Blocked count: 1 Waited count: 62 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 202 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 122 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (Command processor): State: WAITING Blocked count: 2 Waited count: 362 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@92ba5a1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 206 (BP-217505668-172.17.0.3-1733180940242 heartbeating to localhost/127.0.0.1:33369): State: TIMED_WAITING Blocked count: 1525 Waited count: 1747 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@c5436f6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 198 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 208 (IPC Server handler 0 on default port 38073): State: TIMED_WAITING Blocked count: 0 Waited count: 608 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 209 (IPC Server handler 1 on default port 38073): State: TIMED_WAITING Blocked count: 0 Waited count: 608 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 210 (IPC Server handler 2 on default port 38073): State: TIMED_WAITING Blocked count: 0 Waited count: 607 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 3 on default port 38073): State: TIMED_WAITING Blocked count: 0 Waited count: 608 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 4 on default port 38073): State: TIMED_WAITING Blocked count: 0 Waited count: 607 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 215 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data6/current/BP-217505668-172.17.0.3-1733180940242): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data5/current/BP-217505668-172.17.0.3-1733180940242): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5c97aad7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (java.util.concurrent.ThreadPoolExecutor$Worker@481e06ed[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 236 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 6 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:57019): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 235 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 61 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 239 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 304 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 240 (SyncThread:0): State: WAITING Blocked count: 16 Waited count: 437 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ecf1793 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 241 (ProcessThread(sid:0 cport:57019):): State: WAITING Blocked count: 1 Waited count: 530 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4eac5111 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 242 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 561 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2f8c9b01 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 243 (NIOWorkerThread-1): State: WAITING Blocked count: 0 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@164bfb2f Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 1 Waited count: 673 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 42 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test-SendThread(127.0.0.1:57019)): State: RUNNABLE Blocked count: 32 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 258 (Time-limited test-EventThread): State: WAITING Blocked count: 2 Waited count: 61 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@71dcd4e4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 259 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (NIOWorkerThread-3): State: WAITING Blocked count: 5 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-4): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (zk-event-processor-pool-0): State: WAITING Blocked count: 17 Waited count: 74 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1bbea4e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-5): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-6): State: WAITING Blocked count: 4 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-7): State: WAITING Blocked count: 0 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-9): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-11): State: WAITING Blocked count: 6 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-12): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-14): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-15): State: WAITING Blocked count: 0 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41513): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@48e0ebd5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513): State: WAITING Blocked count: 103 Waited count: 369 Waiting on java.util.concurrent.Semaphore$NonfairSync@6aa0e483 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513): State: WAITING Blocked count: 136 Waited count: 683 Waiting on java.util.concurrent.Semaphore$NonfairSync@7d4c66c7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41513): State: WAITING Blocked count: 134 Waited count: 11311 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23fb0aba Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41513): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6c0ae353 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41513): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6c0ae353 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=41513): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1e386ad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=41513): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1f07b208 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=41513): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@64d2c588 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=41513): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@5f79a23a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42328196 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 309 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 329 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 48 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 286 (M:0;9e5ab3b623d7:41513): State: TIMED_WAITING Blocked count: 12 Waited count: 4836 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1092/0x00007fc778f97480.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 350 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 61 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 352 (master/9e5ab3b623d7:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 354 (master/9e5ab3b623d7:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (org.apache.hadoop.hdfs.PeerCache@1f5eab6a): State: TIMED_WAITING Blocked count: 0 Waited count: 202 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 373 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6018 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 388 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 68 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 389 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 72 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 403 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 169 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56198c97 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 61 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 402 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 60132 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 60 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 424 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 42 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 440 (RegionServerTracker-0): State: WAITING Blocked count: 10 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@473cace2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 463 (regionserver/9e5ab3b623d7:0.procedureResultReporter): State: WAITING Blocked count: 11 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4b583fc8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 466 (regionserver/9e5ab3b623d7:0.procedureResultReporter): State: WAITING Blocked count: 18 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@51bf6f00 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 475 (regionserver/9e5ab3b623d7:0.procedureResultReporter): State: WAITING Blocked count: 18 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ef6ef39 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 502 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 510 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 511 (region-location-0): State: WAITING Blocked count: 8 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c8f03b9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 515 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 401 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 59923 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 520 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 544 (ForkJoinPool.commonPool-worker-2): State: WAITING Blocked count: 0 Waited count: 899 Waiting on java.util.concurrent.ForkJoinPool@7623d59f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 569 (region-location-1): State: WAITING Blocked count: 7 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c8f03b9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 570 (region-location-2): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c8f03b9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 977 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1041 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1041 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1071 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1084 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 69 Waited count: 111 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3528c0a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1088 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1089 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1090 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1233 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1234 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1235 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1285 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1286 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1287 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1291 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1292 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1646 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 70 Waiting on java.util.TaskQueue@31be9f74 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1874 (region-location-3): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c8f03b9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1875 (region-location-4): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c8f03b9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2211 (ForkJoinPool.commonPool-worker-4): State: WAITING Blocked count: 0 Waited count: 515 Waiting on java.util.concurrent.ForkJoinPool@7623d59f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 5544 (java.util.concurrent.ThreadPoolExecutor$Worker@63227a7f[State = -1, empty queue]): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@59593bf1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6444 (ForkJoinPool.commonPool-worker-6): State: TIMED_WAITING Blocked count: 0 Waited count: 183 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 6626 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6627 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11430 (AsyncFSWAL-1-hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/MasterData-prefix:9e5ab3b623d7,41513,1733180944669): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7621343d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11440 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 11441 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-12-02T23:19:14,310 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T23:19:33,601 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T23:20:03,602 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;9e5ab3b623d7:41513 232 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 19 Waited count: 20 Waiting on java.lang.ref.ReferenceQueue$Lock@76fbaf48 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 19 Waited count: 26 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 1 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c43cb9e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 2 Waited count: 42 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 6710 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 68 Waiting on java.util.concurrent.CountDownLatch$Sync@69875c16 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11553 Waited count: 12449 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 20 Waited count: 17 Waiting on java.lang.ref.ReferenceQueue$Lock@1a817882 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@787c4f77 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@179eb2f2): State: TIMED_WAITING Blocked count: 0 Waited count: 1338 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 134 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1179866921-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc77842ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1179866921-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc77842ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1179866921-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc77842ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1179866921-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc77842ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1179866921-41-acceptor-0@6a03c20c-ServerConnector@921c3a7{HTTP/1.1, (http/1.1)}{localhost:39743}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1179866921-42): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1179866921-43): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1179866921-44): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-43e3462e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 43 Waited count: 3552 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@65f8ee46 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 33369): State: TIMED_WAITING Blocked count: 1 Waited count: 68 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 134 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@78d40b): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 223 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@61dfd7ba): State: TIMED_WAITING Blocked count: 0 Waited count: 134 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 224 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 65386 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 0 Waited count: 1770 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1db26216 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 33369): State: TIMED_WAITING Blocked count: 68 Waited count: 2843 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 33369): State: TIMED_WAITING Blocked count: 55 Waited count: 2837 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 33369): State: TIMED_WAITING Blocked count: 58 Waited count: 2831 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 33369): State: TIMED_WAITING Blocked count: 76 Waited count: 2851 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 33369): State: TIMED_WAITING Blocked count: 73 Waited count: 2831 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@4ffdcc14): State: TIMED_WAITING Blocked count: 0 Waited count: 334 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@e18af89): State: TIMED_WAITING Blocked count: 0 Waited count: 134 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@3082c3bd): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@a6f2b96): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(438496884)): State: TIMED_WAITING Blocked count: 0 Waited count: 24 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1912695997-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc77842ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1912695997-88-acceptor-0@49a135d1-ServerConnector@41d5b04a{HTTP/1.1, (http/1.1)}{localhost:35071}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1912695997-89): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1912695997-90): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-5a1751e1-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@71eec2ec): State: TIMED_WAITING Blocked count: 0 Waited count: 1335 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 35215): State: TIMED_WAITING Blocked count: 1 Waited count: 68 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 134 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 0 Waited count: 403 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2d2f112c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-217505668-172.17.0.3-1733180940242 heartbeating to localhost/127.0.0.1:33369): State: TIMED_WAITING Blocked count: 1552 Waited count: 1788 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@391e803c): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 35215): State: TIMED_WAITING Blocked count: 0 Waited count: 668 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 35215): State: TIMED_WAITING Blocked count: 0 Waited count: 668 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 35215): State: TIMED_WAITING Blocked count: 0 Waited count: 670 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 35215): State: TIMED_WAITING Blocked count: 0 Waited count: 669 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 35215): State: TIMED_WAITING Blocked count: 0 Waited count: 669 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp482364030-119): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc77842ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp482364030-120-acceptor-0@69642822-ServerConnector@33120766{HTTP/1.1, (http/1.1)}{localhost:35343}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp482364030-121): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp482364030-122): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-4eb75e32-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (IPC Client (243496960) connection to localhost/127.0.0.1:33369 from jenkins): State: TIMED_WAITING Blocked count: 1862 Waited count: 1862 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 124 (IPC Parameter Sending Thread for localhost/127.0.0.1:33369): State: TIMED_WAITING Blocked count: 0 Waited count: 2413 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@1c0140e1): State: TIMED_WAITING Blocked count: 0 Waited count: 1334 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 46543): State: TIMED_WAITING Blocked count: 1 Waited count: 68 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 134 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 3 Waited count: 366 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f4b8e8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-217505668-172.17.0.3-1733180940242 heartbeating to localhost/127.0.0.1:33369): State: TIMED_WAITING Blocked count: 1556 Waited count: 1780 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@45f506ea): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 46543): State: TIMED_WAITING Blocked count: 0 Waited count: 668 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 46543): State: TIMED_WAITING Blocked count: 0 Waited count: 668 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 46543): State: TIMED_WAITING Blocked count: 0 Waited count: 670 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 46543): State: TIMED_WAITING Blocked count: 0 Waited count: 672 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 46543): State: TIMED_WAITING Blocked count: 0 Waited count: 667 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 155 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp733149406-156): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc77842ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp733149406-157-acceptor-0@31434bbb-ServerConnector@66129f6{HTTP/1.1, (http/1.1)}{localhost:44295}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp733149406-158): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp733149406-160): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-7c38871e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data3)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data1)): State: TIMED_WAITING Blocked count: 11 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data4)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data2)): State: TIMED_WAITING Blocked count: 10 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 171 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data2/current/BP-217505668-172.17.0.3-1733180940242): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 174 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data1/current/BP-217505668-172.17.0.3-1733180940242): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data4/current/BP-217505668-172.17.0.3-1733180940242): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 178 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data3/current/BP-217505668-172.17.0.3-1733180940242): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 188 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@61b84862 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 189 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@613c19d1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (java.util.concurrent.ThreadPoolExecutor$Worker@5c0d0d7f[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 195 (java.util.concurrent.ThreadPoolExecutor$Worker@5aa85110[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4c65ffca): State: TIMED_WAITING Blocked count: 0 Waited count: 1333 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 200 (IPC Server idle connection scanner for port 38073): State: TIMED_WAITING Blocked count: 1 Waited count: 68 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 202 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 134 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (Command processor): State: WAITING Blocked count: 2 Waited count: 382 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@92ba5a1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 206 (BP-217505668-172.17.0.3-1733180940242 heartbeating to localhost/127.0.0.1:33369): State: TIMED_WAITING Blocked count: 1545 Waited count: 1787 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@c5436f6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 198 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 208 (IPC Server handler 0 on default port 38073): State: TIMED_WAITING Blocked count: 0 Waited count: 668 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 209 (IPC Server handler 1 on default port 38073): State: TIMED_WAITING Blocked count: 0 Waited count: 668 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 210 (IPC Server handler 2 on default port 38073): State: TIMED_WAITING Blocked count: 0 Waited count: 667 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 3 on default port 38073): State: TIMED_WAITING Blocked count: 0 Waited count: 668 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 4 on default port 38073): State: TIMED_WAITING Blocked count: 0 Waited count: 667 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 215 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data6/current/BP-217505668-172.17.0.3-1733180940242): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data5/current/BP-217505668-172.17.0.3-1733180940242): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5c97aad7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (java.util.concurrent.ThreadPoolExecutor$Worker@481e06ed[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 23 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 236 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 6 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:57019): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 235 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 239 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 334 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 240 (SyncThread:0): State: WAITING Blocked count: 16 Waited count: 441 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ecf1793 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 241 (ProcessThread(sid:0 cport:57019):): State: WAITING Blocked count: 1 Waited count: 534 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4eac5111 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 242 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 565 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2f8c9b01 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 243 (NIOWorkerThread-1): State: WAITING Blocked count: 0 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@164bfb2f Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 1 Waited count: 727 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 42 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test-SendThread(127.0.0.1:57019)): State: RUNNABLE Blocked count: 32 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 258 (Time-limited test-EventThread): State: WAITING Blocked count: 2 Waited count: 61 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@71dcd4e4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 259 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (NIOWorkerThread-3): State: WAITING Blocked count: 5 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-4): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (zk-event-processor-pool-0): State: WAITING Blocked count: 17 Waited count: 74 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1bbea4e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-5): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-6): State: WAITING Blocked count: 4 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-7): State: WAITING Blocked count: 0 Waited count: 102 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-9): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-11): State: WAITING Blocked count: 6 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-12): State: WAITING Blocked count: 4 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-14): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-15): State: WAITING Blocked count: 0 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41513): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@48e0ebd5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513): State: WAITING Blocked count: 103 Waited count: 369 Waiting on java.util.concurrent.Semaphore$NonfairSync@6aa0e483 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513): State: WAITING Blocked count: 136 Waited count: 683 Waiting on java.util.concurrent.Semaphore$NonfairSync@7d4c66c7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41513): State: WAITING Blocked count: 134 Waited count: 11311 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23fb0aba Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41513): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6c0ae353 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41513): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6c0ae353 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=41513): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1e386ad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=41513): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1f07b208 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=41513): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@64d2c588 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=41513): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@5f79a23a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42328196 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 309 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 329 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 48 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 286 (M:0;9e5ab3b623d7:41513): State: TIMED_WAITING Blocked count: 12 Waited count: 4836 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1092/0x00007fc778f97480.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 350 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 352 (master/9e5ab3b623d7:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 354 (master/9e5ab3b623d7:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (org.apache.hadoop.hdfs.PeerCache@1f5eab6a): State: TIMED_WAITING Blocked count: 0 Waited count: 222 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 373 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6618 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 388 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 68 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 389 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 72 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 403 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 169 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56198c97 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 67 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 402 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 66133 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 60 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 424 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 42 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 440 (RegionServerTracker-0): State: WAITING Blocked count: 10 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@473cace2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 463 (regionserver/9e5ab3b623d7:0.procedureResultReporter): State: WAITING Blocked count: 11 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4b583fc8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 466 (regionserver/9e5ab3b623d7:0.procedureResultReporter): State: WAITING Blocked count: 18 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@51bf6f00 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 475 (regionserver/9e5ab3b623d7:0.procedureResultReporter): State: WAITING Blocked count: 18 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ef6ef39 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 502 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 510 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 511 (region-location-0): State: WAITING Blocked count: 8 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c8f03b9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 515 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 401 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 65924 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 520 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 544 (ForkJoinPool.commonPool-worker-2): State: WAITING Blocked count: 0 Waited count: 899 Waiting on java.util.concurrent.ForkJoinPool@7623d59f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 569 (region-location-1): State: WAITING Blocked count: 7 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c8f03b9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 570 (region-location-2): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c8f03b9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 977 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1047 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1041 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1071 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1084 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 69 Waited count: 111 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3528c0a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1088 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1089 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1090 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1233 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1234 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1235 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1285 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1286 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1287 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1291 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1292 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1646 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 70 Waiting on java.util.TaskQueue@31be9f74 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1874 (region-location-3): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c8f03b9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1875 (region-location-4): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c8f03b9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2211 (ForkJoinPool.commonPool-worker-4): State: TIMED_WAITING Blocked count: 0 Waited count: 516 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 5544 (java.util.concurrent.ThreadPoolExecutor$Worker@63227a7f[State = -1, empty queue]): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@59593bf1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6626 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6627 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11430 (AsyncFSWAL-1-hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/MasterData-prefix:9e5ab3b623d7,41513,1733180944669): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7621343d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11441 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11446 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-02T23:20:33,602 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T23:21:03,602 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;9e5ab3b623d7:41513 230 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 19 Waited count: 20 Waiting on java.lang.ref.ReferenceQueue$Lock@76fbaf48 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 19 Waited count: 27 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: WAITING Blocked count: 1 Waited count: 30 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c43cb9e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: RUNNABLE Blocked count: 2 Waited count: 45 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 7309 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 74 Waiting on java.util.concurrent.CountDownLatch$Sync@6ccaa1ca Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11553 Waited count: 12450 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 20 Waited count: 17 Waiting on java.lang.ref.ReferenceQueue$Lock@1a817882 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@787c4f77 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@179eb2f2): State: TIMED_WAITING Blocked count: 0 Waited count: 1458 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 146 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1179866921-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc77842ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1179866921-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc77842ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1179866921-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc77842ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1179866921-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc77842ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1179866921-41-acceptor-0@6a03c20c-ServerConnector@921c3a7{HTTP/1.1, (http/1.1)}{localhost:39743}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1179866921-42): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1179866921-43): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1179866921-44): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-43e3462e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 43 Waited count: 3552 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@65f8ee46 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 33369): State: TIMED_WAITING Blocked count: 1 Waited count: 74 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 146 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@78d40b): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@61dfd7ba): State: TIMED_WAITING Blocked count: 0 Waited count: 146 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 244 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 71347 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 0 Waited count: 1770 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1db26216 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 33369): State: TIMED_WAITING Blocked count: 68 Waited count: 2904 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 33369): State: TIMED_WAITING Blocked count: 55 Waited count: 2898 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 33369): State: TIMED_WAITING Blocked count: 58 Waited count: 2892 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 33369): State: TIMED_WAITING Blocked count: 76 Waited count: 2912 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 33369): State: TIMED_WAITING Blocked count: 73 Waited count: 2892 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@4ffdcc14): State: TIMED_WAITING Blocked count: 0 Waited count: 364 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@e18af89): State: TIMED_WAITING Blocked count: 0 Waited count: 146 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@3082c3bd): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@a6f2b96): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(438496884)): State: TIMED_WAITING Blocked count: 0 Waited count: 26 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1912695997-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc77842ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1912695997-88-acceptor-0@49a135d1-ServerConnector@41d5b04a{HTTP/1.1, (http/1.1)}{localhost:35071}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1912695997-89): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1912695997-90): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-5a1751e1-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@71eec2ec): State: TIMED_WAITING Blocked count: 0 Waited count: 1455 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 35215): State: TIMED_WAITING Blocked count: 1 Waited count: 74 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 146 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 0 Waited count: 423 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2d2f112c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-217505668-172.17.0.3-1733180940242 heartbeating to localhost/127.0.0.1:33369): State: TIMED_WAITING Blocked count: 1572 Waited count: 1828 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@391e803c): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 35215): State: TIMED_WAITING Blocked count: 0 Waited count: 728 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 35215): State: TIMED_WAITING Blocked count: 0 Waited count: 728 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 35215): State: TIMED_WAITING Blocked count: 0 Waited count: 730 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 35215): State: TIMED_WAITING Blocked count: 0 Waited count: 729 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 35215): State: TIMED_WAITING Blocked count: 0 Waited count: 729 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 118 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (qtp482364030-119): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc77842ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (qtp482364030-120-acceptor-0@69642822-ServerConnector@33120766{HTTP/1.1, (http/1.1)}{localhost:35343}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp482364030-121): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp482364030-122): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-4eb75e32-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (IPC Client (243496960) connection to localhost/127.0.0.1:33369 from jenkins): State: TIMED_WAITING Blocked count: 1922 Waited count: 1922 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 124 (IPC Parameter Sending Thread for localhost/127.0.0.1:33369): State: TIMED_WAITING Blocked count: 0 Waited count: 2473 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@1c0140e1): State: TIMED_WAITING Blocked count: 0 Waited count: 1454 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 46543): State: TIMED_WAITING Blocked count: 1 Waited count: 74 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 146 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 3 Waited count: 386 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f4b8e8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-217505668-172.17.0.3-1733180940242 heartbeating to localhost/127.0.0.1:33369): State: TIMED_WAITING Blocked count: 1576 Waited count: 1820 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@45f506ea): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 46543): State: TIMED_WAITING Blocked count: 0 Waited count: 728 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 46543): State: TIMED_WAITING Blocked count: 0 Waited count: 728 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 46543): State: TIMED_WAITING Blocked count: 0 Waited count: 730 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 46543): State: TIMED_WAITING Blocked count: 0 Waited count: 732 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 46543): State: TIMED_WAITING Blocked count: 0 Waited count: 728 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 155 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp733149406-156): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc77842ac40.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp733149406-157-acceptor-0@31434bbb-ServerConnector@66129f6{HTTP/1.1, (http/1.1)}{localhost:44295}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp733149406-158): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp733149406-160): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-7c38871e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data3)): State: TIMED_WAITING Blocked count: 6 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data1)): State: TIMED_WAITING Blocked count: 11 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data4)): State: TIMED_WAITING Blocked count: 8 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data2)): State: TIMED_WAITING Blocked count: 10 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 171 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data2/current/BP-217505668-172.17.0.3-1733180940242): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 174 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data1/current/BP-217505668-172.17.0.3-1733180940242): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data4/current/BP-217505668-172.17.0.3-1733180940242): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 178 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data3/current/BP-217505668-172.17.0.3-1733180940242): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 188 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@61b84862 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 189 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@613c19d1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (java.util.concurrent.ThreadPoolExecutor$Worker@5c0d0d7f[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 195 (java.util.concurrent.ThreadPoolExecutor$Worker@5aa85110[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4c65ffca): State: TIMED_WAITING Blocked count: 0 Waited count: 1453 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 200 (IPC Server idle connection scanner for port 38073): State: TIMED_WAITING Blocked count: 1 Waited count: 74 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 202 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 146 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (Command processor): State: WAITING Blocked count: 2 Waited count: 402 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@92ba5a1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 206 (BP-217505668-172.17.0.3-1733180940242 heartbeating to localhost/127.0.0.1:33369): State: TIMED_WAITING Blocked count: 1565 Waited count: 1827 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@c5436f6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 198 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 208 (IPC Server handler 0 on default port 38073): State: TIMED_WAITING Blocked count: 0 Waited count: 728 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 209 (IPC Server handler 1 on default port 38073): State: TIMED_WAITING Blocked count: 0 Waited count: 728 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 210 (IPC Server handler 2 on default port 38073): State: TIMED_WAITING Blocked count: 0 Waited count: 727 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 3 on default port 38073): State: TIMED_WAITING Blocked count: 0 Waited count: 728 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 4 on default port 38073): State: TIMED_WAITING Blocked count: 0 Waited count: 727 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 215 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data6/current/BP-217505668-172.17.0.3-1733180940242): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 222 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data5/current/BP-217505668-172.17.0.3-1733180940242): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5c97aad7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (java.util.concurrent.ThreadPoolExecutor$Worker@481e06ed[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 25 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 236 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 6 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:57019): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 235 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 73 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 239 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 364 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 240 (SyncThread:0): State: WAITING Blocked count: 16 Waited count: 445 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6ecf1793 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 241 (ProcessThread(sid:0 cport:57019):): State: WAITING Blocked count: 1 Waited count: 538 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4eac5111 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 242 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 569 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2f8c9b01 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 243 (NIOWorkerThread-1): State: WAITING Blocked count: 0 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@164bfb2f Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 1 Waited count: 779 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 42 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test-SendThread(127.0.0.1:57019)): State: RUNNABLE Blocked count: 32 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 258 (Time-limited test-EventThread): State: WAITING Blocked count: 2 Waited count: 61 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@71dcd4e4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 259 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (NIOWorkerThread-3): State: WAITING Blocked count: 5 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-4): State: WAITING Blocked count: 4 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (zk-event-processor-pool-0): State: WAITING Blocked count: 17 Waited count: 74 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1bbea4e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-5): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-6): State: WAITING Blocked count: 4 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-7): State: WAITING Blocked count: 0 Waited count: 102 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-8): State: WAITING Blocked count: 4 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-9): State: WAITING Blocked count: 2 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-10): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-11): State: WAITING Blocked count: 6 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-12): State: WAITING Blocked count: 4 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-14): State: WAITING Blocked count: 4 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-15): State: WAITING Blocked count: 0 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e1736 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41513): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@48e0ebd5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513): State: WAITING Blocked count: 103 Waited count: 369 Waiting on java.util.concurrent.Semaphore$NonfairSync@6aa0e483 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513): State: WAITING Blocked count: 136 Waited count: 683 Waiting on java.util.concurrent.Semaphore$NonfairSync@7d4c66c7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41513): State: WAITING Blocked count: 134 Waited count: 11311 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23fb0aba Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41513): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6c0ae353 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41513): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6c0ae353 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=41513): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1e386ad Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=41513): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1f07b208 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=41513): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@64d2c588 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=41513): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@5f79a23a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42328196 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 309 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 329 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 48 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 286 (M:0;9e5ab3b623d7:41513): State: TIMED_WAITING Blocked count: 12 Waited count: 4836 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1092/0x00007fc778f97480.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 350 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 73 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 352 (master/9e5ab3b623d7:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 354 (master/9e5ab3b623d7:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 356 (org.apache.hadoop.hdfs.PeerCache@1f5eab6a): State: TIMED_WAITING Blocked count: 0 Waited count: 242 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 373 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 7217 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 388 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 68 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 389 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 72 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 403 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 169 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@56198c97 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 73 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 402 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 72135 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 60 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 424 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 42 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 440 (RegionServerTracker-0): State: WAITING Blocked count: 10 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@473cace2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 463 (regionserver/9e5ab3b623d7:0.procedureResultReporter): State: WAITING Blocked count: 11 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4b583fc8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 466 (regionserver/9e5ab3b623d7:0.procedureResultReporter): State: WAITING Blocked count: 18 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@51bf6f00 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 475 (regionserver/9e5ab3b623d7:0.procedureResultReporter): State: WAITING Blocked count: 18 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ef6ef39 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 502 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 510 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 511 (region-location-0): State: WAITING Blocked count: 8 Waited count: 16 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c8f03b9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 515 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 401 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 71926 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 520 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 544 (ForkJoinPool.commonPool-worker-2): State: TIMED_WAITING Blocked count: 0 Waited count: 900 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 569 (region-location-1): State: WAITING Blocked count: 7 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c8f03b9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 570 (region-location-2): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c8f03b9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 977 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1053 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1041 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1071 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1084 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 69 Waited count: 111 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3528c0a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1088 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1089 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1090 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1233 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1234 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1235 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1285 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1286 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1287 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1291 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1292 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1646 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 70 Waiting on java.util.TaskQueue@31be9f74 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1874 (region-location-3): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c8f03b9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1875 (region-location-4): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c8f03b9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5544 (java.util.concurrent.ThreadPoolExecutor$Worker@63227a7f[State = -1, empty queue]): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@59593bf1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6626 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6627 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11430 (AsyncFSWAL-1-hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/MasterData-prefix:9e5ab3b623d7,41513,1733180944669): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7621343d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11446 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-02T23:21:33,602 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T23:21:58,510 DEBUG [Time-limited test {}] hbase.LocalHBaseCluster(398): Interrupted java.lang.InterruptedException: null at java.lang.Object.wait(Native Method) ~[?:?] at java.lang.Thread.join(Thread.java:1307) ~[?:?] at org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:111) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T23:21:58,517 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@30354aa5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T23:21:58,518 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@66129f6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T23:21:58,518 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T23:21:58,518 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@186ac0d4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T23:21:58,519 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@467ac91b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop.log.dir/,STOPPED} 2024-12-02T23:21:58,520 WARN [BP-217505668-172.17.0.3-1733180940242 heartbeating to localhost/127.0.0.1:33369 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T23:21:58,520 WARN [BP-217505668-172.17.0.3-1733180940242 heartbeating to localhost/127.0.0.1:33369 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-217505668-172.17.0.3-1733180940242 (Datanode Uuid fbe7c001-5e56-4901-a413-ab6e43bc91a6) service to localhost/127.0.0.1:33369 2024-12-02T23:21:58,521 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T23:21:58,521 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] ====> TEST TIMED OUT. PRINTING THREAD DUMP. <==== Timestamp: 2024-12-02 11:21:58,514 "region-location-0" daemon prio=5 tid=511 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "regionserver/9e5ab3b623d7:0.procedureResultReporter" daemon prio=5 tid=463 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) "pool-18-thread-1" prio=5 tid=86 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@3082c3bd" daemon prio=5 tid=73 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@a6f2b96" daemon prio=5 tid=74 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 0 on default port 33369" daemon prio=5 tid=64 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "org.apache.hadoop.util.JvmPauseMonitor$Monitor@71eec2ec" daemon prio=5 tid=93 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "nioEventLoopGroup-4-1" prio=10 tid=126 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) at app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Socket Reader #1 for port 0" daemon prio=5 tid=55 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) at app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) "IPC Server listener on 0" daemon prio=5 tid=128 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) "MiniHBaseClusterRegionServer-EventLoopGroup-4-1" daemon prio=10 tid=309 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTi2024-12-02T23:21:58,522 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data5/current/BP-217505668-172.17.0.3-1733180940242 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T23:21:58,522 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data6/current/BP-217505668-172.17.0.3-1733180940242 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted merChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 3 on default port 33369" daemon prio=5 tid=67 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "Time-limited test" daemon prio=5 tid=22 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/java.lang.Thread.start0(Native Method) at java.base@17.0.11/java.lang.Thread.start(Thread.java:809) at app//io.netty.util.concurrent.ThreadPerTaskExecutor.execute(ThreadPerTaskExecutor.java:32) at app//io.netty.util.internal.ThreadExecutorMap$1.execute(ThreadExecutorMap.java:57) at app//io.netty.util.concurrent.SingleThreadEventExecutor.doStartThread(SingleThreadEventExecutor.java:983) at app//io.netty.util.concurrent.SingleThreadEventExecutor.ensureThreadStarted(SingleThreadEventExecutor.java:966) at app//io.netty.util.concurrent.SingleThreadEventExecutor.shutdownGracefully(SingleThreadEventExecutor.java:660) at app//io.netty.util.concurrent.MultithreadEventExecutorGroup.shutdownGracefully(MultithreadEventExecutorGroup.java:163) at app//io.netty.util.concurrent.AbstractEventExecutorGroup.shutdownGracefully(AbstractEventExecutorGroup.java:70) at app//org.apache.hadoop.hdfs.server.datanode.web.DatanodeHttpServer.close(DatanodeHttpServer.java:338) at app//org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:2573) at app//org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNode(MiniDFSCluster.java:2232) at app//org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:2222) at app//org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2201) at app//org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) at app//org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) at app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) at app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) at app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) at app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at app//org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at app//org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data2/current/BP-217505668-172.17.0.3-1733180940242" daemon prio=5 tid=171 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "nioEventLoopGroup-2-1" prio=10 tid=92 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) at app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RequestThrottler" daemon prio=5 tid=242 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) "Socket Reader #1 for port 0" daemon prio=5 tid=129 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) at app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) "Hadoop-Metrics-Updater-0" daemon prio=5 tid=132 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(Abst2024-12-02T23:21:58,523 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func ractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@391e803c" daemon prio=5 tid=85 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) at java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) at app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) at app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "HBase-Metrics2-1" daemon prio=5 tid=255 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-11" daemon prio=5 tid=1286 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 1 on default port 33369" daemon prio=5 tid=65 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "master/9e5ab3b623d7:0:becomeActiveMaster-MemStoreChunkPool Statistics" daemon prio=5 tid=352 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp1179866921-37" daemon prio=5 tid=37 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) at app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc77842ac40.run(Unknown Source) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-14" daemon prio=5 tid=1292 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server Responder" daemon prio=5 tid=131 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) at app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) "Container metrics unregistration" daemon prio=5 tid=1646 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.Object.wait(Object.java:338) at java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) at java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) "IPC Server handler 2 on default port 33369" daemon prio=5 tid=66 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "FSEditLogAsync" daemon prio=5 tid=53 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) at app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) at app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data4)" daemon prio=5 tid=164 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) "FsDatasetAsyncDiskServiceFixer" daemon prio=5 tid=234 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) "RPCClient-NioEventLoopGroup-6-9" daemon prio=5 tid=1235 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp1179866921-40" daemon prio=5 tid=40 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) at app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc77842ac40.run(Unknown Source) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp1912695997-87" daemon prio=5 tid=87 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) at app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc77842ac40.run(Unknown Source) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "nioEventLoopGroup-7-2" prio=10 tid=11452 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) at app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) at app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) at app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Monitor thread for TaskMonitor" daemon prio=5 tid=350 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server listener on 0" daemon prio=5 tid=94 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) "IPC Server handler 1 on default port 38073" daemon prio=5 tid=209 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "Session-HouseKeeper-5a1751e1-1" prio=5 tid=91 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "MarkedDeleteBlockScrubberThread" daemon prio=5 tid=48 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "CacheReplicationMonitor(438496884)" daemon prio=5 tid=75 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) at app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) "NIOServerCxnFactory.SelectorThread-1" daemon prio=5 tid=237 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) at app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) "Idle-Rpc-Conn-Sweeper-pool-0" daemon prio=5 tid=403 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-26-thread-1" prio=5 tid=118 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.util.JvmPauseMonitor$Monitor@1c0140e1" daemon prio=5 tid=127 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@45f506ea" daemon prio=5 tid=117 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) at java.base@17.0.11/sun.nio.ch.Server2024-12-02T23:21:58,525 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@277f3f27{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} SocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) at java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) at app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) at app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "SSL Certificates Store Monitor" daemon prio=5 tid=25 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.Object.wait(Object.java:338) at java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) at java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) "qtp1912695997-88-acceptor-0@49a135d1-ServerConnector@41d5b04a{HTTP/1.1, (http/1.1)}{localhost:35071}" daemon prio=3 tid=88 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) at app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) at app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 4 on default port 35215" daemon prio=5 tid=108 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@e18af89" daemon prio=5 tid=72 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server listener on 0" daemon prio=5 tid=198 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) "RPCClient-NioEventLoopGroup-6-2" daemon prio=5 tid=520 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.hdfs.PeerCache@1f5eab6a" daemon prio=5 tid=356 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) at app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) at app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=41513" daemon prio=5 tid=277 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) at java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) at app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data1/current/BP-217505668-172.17.0.3-1733180940242" daemon prio=5 tid=174 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Signal Dispatcher" daemon prio=9 tid=4 runnable java.lang.Thread.State: RUNNABLE "M:0;9e5ab3b623d7:41513" daemon prio=5 tid=286 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) at app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) at app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) at app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) at app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) at app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1092/0x00007fc778f97480.run(Unknown Source) at app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) at app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) at app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) at app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) at app//org.apache.hadoop.hbase.regionserver.HReg2024-12-02T23:21:58,525 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@33120766{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T23:21:58,525 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T23:21:58,525 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7e56013e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} ion.internalFlushcache(HRegion.java:2735) at app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) at app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) at app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) at app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) at app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) at app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) at app//org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) at app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) at app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) at app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp733149406-157" daemon prio=5 tid=157 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) at app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) at app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "surefire-forkedjvm-stream-flusher" daemon prio=5 tid=16 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server idle connection scanner for port 46543" daemon prio=5 tid=130 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) at java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) "IPC Server handler 0 on default port 35215" daemon prio=5 tid=104 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "weak-ref-cleaner-strictcontextstorage" daemon prio=1 tid=254 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) at java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) at app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-15-thread-1" daemon prio=5 tid=188 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Timer for 'JobHistoryServer' metrics system" daemon prio=5 tid=11446 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) at java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) "Command processor" daemon prio=5 tid=205 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) "Block report processor" daemon prio=5 tid=51 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueued2024-12-02T23:21:58,525 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6f51a37c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop.log.dir/,STOPPED} Synchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) at app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) at app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) "Hadoop-Metrics-Updater-0" daemon prio=5 tid=98 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 3 on default port 38073" daemon prio=5 tid=211 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "Socket Reader #1 for port 0" daemon prio=5 tid=199 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) at app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) "NIOWorkerThread-1" daemon prio=5 tid=243 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=41513" daemon prio=5 tid=276 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) at java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) at app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "RPCClient-NioEventLoopGroup-6-8" daemon prio=5 tid=1234 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-4" daemon prio=5 tid=261 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "ProcessThread(sid:0 cport:57019):" daemon prio=5 tid=241 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueued2024-12-02T23:21:58,527 WARN [BP-217505668-172.17.0.3-1733180940242 heartbeating to localhost/127.0.0.1:33369 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T23:21:58,527 WARN [BP-217505668-172.17.0.3-1733180940242 heartbeating to localhost/127.0.0.1:33369 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-217505668-172.17.0.3-1733180940242 (Datanode Uuid 77e55a7f-d7ba-46c0-963f-8728d50b2740) service to localhost/127.0.0.1:33369 Synchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) "NIOWorkerThread-8" daemon prio=5 tid=266 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-3" daemon prio=5 tid=260 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RpcClient-timer-pool-0" daemon prio=5 tid=402 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) at app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-23-thread-1" daemon prio=5 tid=189 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=41513" daemon prio=5 tid=282 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) at java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) at app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "qtp1179866921-43" daemon prio=5 tid=43 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "SyncThread:0" daemon prio=5 tid=240 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) "qtp1179866921-42" daemon prio=5 tid=42 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:57019" daemon prio=5 tid=238 runnable java.lang.Thread.State: RUNNABLE 2024-12-02T23:21:58,527 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data3/current/BP-217505668-172.17.0.3-1733180940242 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T23:21:58,527 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data4/current/BP-217505668-172.17.0.3-1733180940242 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T23:21:58,527 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) at app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) "pool-36-thread-1" prio=5 tid=155 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-5" daemon prio=5 tid=1088 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RedundancyMonitor" daemon prio=5 tid=47 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) at java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) at app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "java.util.concurrent.ThreadPoolExecutor$Worker@63227a7f[State = -1, empty queue]" daemon prio=5 tid=5544 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "nioEventLoopGroup-7-1" prio=10 tid=11451 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) at app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) at app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) at app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41513" daemon prio=5 tid=281 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "region-location-3" daemon prio=5 tid=1874 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp1912695997-89" daemon prio=5 tid=89 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(Lo2024-12-02T23:21:58,528 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T23:21:58,528 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func ckSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-16" daemon prio=5 tid=6627 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@61dfd7ba" daemon prio=5 tid=49 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "SessionTracker" daemon prio=5 tid=239 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) "surefire-forkedjvm-command-thread" daemon prio=5 tid=18 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) at java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) at java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) at app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) at java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) at java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) at java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) at app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) at app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) at app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) at app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) at app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) at app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) at app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) at app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=41513" daemon prio=5 tid=285 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) at java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) at app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "IPC Parameter Sending Thread for localhost/127.0.0.1:33369" daemon prio=5 tid=124 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) at java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) at app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data4/current/BP-217505668-172.17.0.3-1733180940242" daemon prio=5 tid=176 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp1912695997-90" daemon prio=5 tid=90 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server idle connection scanner for port 35215" daemon prio=5 tid=96 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) at java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) "Hadoop-Metrics-Updater-0" daemon prio=5 tid=58 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 2 on default port 35215" daemon prio=5 tid=106 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "qtp733149406-158" daemon prio=5 tid=158 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server idle connection scanner for port 33369" daemon prio=5 tid=56 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) at java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) "master:store-WAL-Roller" daemon prio=5 tid=373 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) "RPCClient-NioEventLoopGroup-6-3" daemon prio=5 tid=525 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "region-location-1" daemon prio=5 tid=569 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Reference Handler" daemon prio=10 tid=2 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) at java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) at java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) "qtp482364030-122" daemon prio=5 tid=122 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 3 on default port 46543" daemon prio=5 tid=142 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "MiniHBaseClusterRegionServer-EventLoopGroup-5-2" daemon prio=10 tid=388 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "region-location-2" daemon prio=5 tid=570 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp1179866921-44" daemon prio=5 tid=44 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "java.util.concurrent.ThreadPoolExecutor$Worker@5c0d0d7f[State = -1, empty queue]" daemon prio=5 tid=192 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data5/current/BP-217505668-172.17.0.3-1733180940242" daemon prio=5 tid=222 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOServerCxnFactory.SelectorThread-0" daemon prio=5 tid=236 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) at app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) "RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41513" daemon prio=5 tid=280 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "IPC Server handler 0 on default port 46543" daemon prio=5 tid=139 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "pool-1-thread-2" daemon prio=5 tid=15 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) at java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) at java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Command processor" daemon prio=5 tid=135 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) "IPC Server handler 1 on default port 46543" daemon prio=5 tid=140 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "IPC Server listener on 0" daemon prio=5 tid=54 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) "BP-217505668-172.17.0.3-1733180940242 heartbeating to localhost/127.0.0.1:33369" daemon prio=5 tid=206 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "HMaster-EventLoopGroup-1-3" daemon prio=10 tid=424 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Session-HouseKeeper-7c38871e-1" prio=5 tid=161 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "nioEventLoopGroup-6-2" prio=10 tid=11449 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) at app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) at app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) at app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 4 on default port 33369" daemon prio=5 tid=68 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "BP-217505668-172.17.0.3-1733180940242 heartbeating to localhost/127.0.0.1:33369" daemon prio=5 tid=136 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server Responder" daemon prio=5 tid=201 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) at app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) "org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@78d40b" daemon prio=5 tid=61 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-13" daemon prio=5 tid=1291 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-20-thread-1" prio=5 tid=103 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-6" daemon prio=5 tid=1090 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Notification Thread" daemon prio=9 tid=13 runnable java.lang.Thread.State: RUNNABLE "qtp733149406-160" daemon prio=5 tid=160 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@4ffdcc14" daemon prio=5 tid=71 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 1 on default port 35215" daemon prio=5 tid=105 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "NIOWorkerThread-7" daemon prio=5 tid=265 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server Responder" daemon prio=5 tid=57 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) at app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) "pool-29-thread-1" prio=5 tid=137 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-6-thread-1" prio=5 tid=36 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "MiniHBaseClusterRegionServer-EventLoopGroup-3-2" daemon prio=10 tid=502 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-5" daemon prio=5 tid=263 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data2)" daemon prio=5 tid=165 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) "IPC Client (243496960) connection to localhost/127.0.0.1:33369 from jenkins" daemon prio=5 tid=123 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) at app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) "qtp1179866921-38" daemon prio=5 tid=38 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) at app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc77842ac40.run(Unknown Source) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "MiniHBaseClusterRegionServer-EventLoopGroup-5-1" daemon prio=10 tid=329 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Hadoop-Metrics-Updater-0" daemon prio=5 tid=202 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-14" daemon prio=5 tid=272 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Time-limited test.named-queue-events-pool-0" daemon prio=5 tid=288 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) at app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) at app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) at app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "AsyncFSWAL-1-hdfs://localhost:33369/user/jenkins/test-data/962a16a2-a4b8-b3f3-e9a9-f9c6ee781156/MasterData-prefix:9e5ab3b623d7,41513,1733180944669" daemon prio=5 tid=11430 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 2 on default port 46543" daemon prio=5 tid=141 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "Socket Reader #1 for port 0" daemon prio=5 tid=95 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) at app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) "RPCClient-NioEventLoopGroup-6-1" daemon prio=5 tid=515 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "regionserver/9e5ab3b623d7:0.procedureResultReporter" daemon prio=5 tid=466 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) "IPC Server handler 4 on default port 46543" daemon prio=5 tid=143 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "pool-1-thread-1" daemon prio=5 tid=14 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) at java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) at java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "main" prio=5 tid=1 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/java.lang.Thread.dumpThreads(Native Method) at java.base@17.0.11/java.lang.Thread.getAllStackTraces(Thread.java:1671) at app//org.apache.hadoop.hbase.TimedOutTestsListener.buildThreadDump(TimedOutTestsListener.java:92) at app//org.apache.hadoop.hbase.TimedOutTestsListener.buildThreadDiagnosticString(TimedOutTestsListener.java:78) at app//org.apache.hadoop.hbase.TimedOutTestsListener.testFailure(TimedOutTestsListener.java:65) at app//org.junit.runner.notification.SynchronizedRunListener.testFailure(SynchronizedRunListener.java:94) at app//org.junit.runner.notification.RunNotifier$6.notifyListener(RunNotifier.java:177) at app//org.junit.runner.notification.RunNotifier$SafeNotifier.run(RunNotifier.java:72) at app//org.junit.runner.notification.RunNotifier.fireTestFailures(RunNotifier.java:173) at app//org.junit.runner.notification.RunNotifier.fireTestFailure(RunNotifier.java:167) at app//org.apache.maven.surefire.common.junit4.Notifier.fireTestFailure(Notifier.java:100) at app//org.junit.internal.runners.model.EachTestNotifier.addFailure(EachTestNotifier.java:23) at app//org.junit.internal.runners.model.EachTestNotifier.addMultipleFailureException(EachTestNotifier.java:29) at app//org.junit.internal.runners.model.EachTestNotifier.addFailure(EachTestNotifier.java:21) at app//org.junit.runners.ParentRunner.run(ParentRunner.java:419) at app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) at app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) at app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) at app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) at app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) at app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) at app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) at app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) "NIOWorkerThread-6" daemon prio=5 tid=264 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "SnapshotHandlerChoreCleaner" daemon prio=5 tid=414 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41513" daemon prio=5 tid=278 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) at java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) at app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data3/current/BP-217505668-172.17.0.3-1733180940242" daemon prio=5 tid=178 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "DatanodeAdminMonitor-0" daemon prio=5 tid=62 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server Responder" daemon prio=5 tid=97 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) at app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) "MiniHBaseClusterRegionServer-EventLoopGroup-3-3" daemon prio=10 tid=510 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Async-Client-Retry-Timer-pool-0" daemon prio=5 tid=401 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) at app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "MiniHBaseClusterRegionServer-EventLoopGroup-5-3" daemon prio=10 tid=389 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-11" daemon prio=5 tid=269 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp1179866921-41-acceptor-0@6a03c20c-ServerConnector@921c3a7{HTTP/1.1, (http/1.1)}{localhost:39743}" daemon prio=3 tid=41 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) at app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) at app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp482364030-120-acceptor-0@69642822-ServerConnector@33120766{HTTP/1.1, (http/1.1)}{localhost:35343}" daemon prio=3 tid=120 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) at app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) at app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-15" daemon prio=5 tid=273 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-4" daemon prio=5 tid=1041 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp482364030-121" daemon prio=5 tid=121 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-12" daemon prio=5 tid=1287 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data3)" daemon prio=5 tid=162 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) "HMaster-EventLoopGroup-1-2" daemon prio=10 tid=423 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Session-HouseKeeper-4eb75e32-1" prio=5 tid=125 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-9" daemon prio=5 tid=267 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "MutableQuantiles-0" daemon prio=5 tid=977 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp1179866921-39" daemon prio=5 tid=39 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) at app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc77842ac40.run(Unknown Source) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "java.util.concurrent.ThreadPoolExecutor$Worker@5aa85110[State = -1, empty queue]" daemon prio=5 tid=195 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "HMaster-EventLoopGroup-1-1" daemon prio=10 tid=256 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 3 on default port 35215" daemon prio=5 tid=107 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "NIOWorkerThread-12" daemon prio=5 tid=270 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp733149406-156" daemon prio=5 tid=156 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) at app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc77842ac40.run(Unknown Source) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-12-thread-1" prio=5 tid=69 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=41513" daemon prio=5 tid=284 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) at java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) at app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "Time-limited test-SendThread(127.0.0.1:57019)" daemon prio=5 tid=257 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) at app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) "NIOWorkerThread-16" daemon prio=5 tid=274 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "zk-permission-watcher-pool-0" daemon prio=5 tid=1084 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RegionServerTracker-0" daemon prio=5 tid=440 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-33-thread-1" daemon prio=5 tid=226 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "region-location-4" daemon prio=5 tid=1875 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-13" daemon prio=5 tid=271 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100" daemon prio=5 tid=35 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) "Time-limited test-EventThread" daemon prio=5 tid=258 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) "MiniHBaseClusterRegionServer-EventLoopGroup-4-2" daemon prio=10 tid=1071 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-7-thread-1" prio=5 tid=46 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-10" daemon prio=5 tid=1285 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "BP-217505668-172.17.0.3-1733180940242 heartbeating to localhost/127.0.0.1:33369" daemon prio=5 tid=102 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "regionserver/9e5ab3b623d7:0.procedureResultReporter" daemon prio=5 tid=475 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) "refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data6/current/BP-217505668-172.17.0.3-1733180940242" daemon prio=5 tid=221 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=41513" daemon prio=5 tid=283 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) at java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) at app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "IPC Server idle connection scanner for port 38073" daemon prio=5 tid=200 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) at java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) "MiniHBaseClusterRegionServer-EventLoopGroup-4-3" daemon prio=10 tid=1089 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Finalizer" daemon prio=8 tid=3 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) at java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) at java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) "IPC Server handler 2 on default port 38073" daemon prio=5 tid=210 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "RPCClient-NioEventLoopGroup-6-15" daemon prio=5 tid=6626 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner" daemon prio=5 tid=23 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) at java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) at app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-7" daemon prio=5 tid=1233 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp482364030-119" daemon prio=5 tid=119 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) at app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007fc77842ac40.run(Unknown Source) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.util.JvmPauseMonitor$Monitor@179eb2f2" daemon prio=5 tid=34 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "nioEventLoopGroup-6-1" prio=10 tid=196 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//io.netty.util.concurrent.SingleThreadEventExecutor.co2024-12-02T23:21:58,536 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2da58674{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} nfirmShutdown(SingleThreadEventExecutor.java:787) at app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) at app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) at app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Session-HouseKeeper-43e3462e-1" prio=5 tid=45 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Command processor" daemon prio=5 tid=101 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) "Common-Cleaner" daemon prio=8 tid=12 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) at java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) at java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) "ConnnectionExpirer" daemon prio=5 tid=235 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) "IPC Server handler 4 on default port 38073" daemon prio=5 tid=212 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "NIOWorkerThread-2" daemon prio=5 tid=259 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "nioEventLoopGroup-6-3" prio=10 tid=11450 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) at app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) at app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) at app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "MiniHBaseClusterRegionServer-EventLoopGroup-3-1" daemon prio=10 tid=289 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "zk-event-processor-pool-0" daemon prio=5 tid=262 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base2024-12-02T23:21:58,536 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@41d5b04a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T23:21:58,536 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging @17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-10" daemon prio=5 tid=268 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41513" daemon prio=5 tid=279 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "IPC Server handler 0 on default port 38073" daemon prio=5 tid=208 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "master/9e5ab3b623d7:0:becomeActiveMaster-MemStoreChunkPool Statistics" daemon prio=5 tid=354 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data1)" daemon prio=5 tid=163 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) "org.apache.hadoop.util.JvmPauseMonitor$Monitor@4c65ffca" daemon prio=5 tid=197 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-12-02T23:21:58,537 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a229d4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T23:21:58,537 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3178d580{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/hadoop.log.dir/,STOPPED} 2024-12-02T23:21:58,538 WARN [BP-217505668-172.17.0.3-1733180940242 heartbeating to localhost/127.0.0.1:33369 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T23:21:58,538 WARN [BP-217505668-172.17.0.3-1733180940242 heartbeating to localhost/127.0.0.1:33369 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-217505668-172.17.0.3-1733180940242 (Datanode Uuid 46e97ca3-746a-4afe-806a-5271e62119a0) service to localhost/127.0.0.1:33369 2024-12-02T23:21:58,538 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T23:21:58,538 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T23:21:58,539 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data1/current/BP-217505668-172.17.0.3-1733180940242 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T23:21:58,539 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/79da57b0-8cbd-cc14-ae02-464fcd0b3a77/cluster_580b686f-adc4-1d82-6054-b11486025bde/data/data2/current/BP-217505668-172.17.0.3-1733180940242 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T23:21:58,539 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func